anolis: net: supports linkdata ethernet controllers

ANBZ: #20924

This commit is to support Linkdata ethernet controllers.

Signed-off-by: liujie_answer <liujie5@linkdatatechnology.com>
Reviewed-by: Guixin Liu <kanie@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/5302
This commit is contained in:
liujie_answer 2025-05-23 17:05:58 +08:00 committed by 小龙
parent 4e26f60571
commit f55326b5b3
114 changed files with 52887 additions and 0 deletions

View File

@ -0,0 +1 @@
CONFIG_NET_VENDOR_LINKDATA=y

View File

@ -0,0 +1 @@
CONFIG_SXE=m

View File

@ -0,0 +1 @@
CONFIG_SXE_VF=m

View File

@ -84,6 +84,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/linkdata/Kconfig"
source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
source "drivers/net/ethernet/yunsilicon/Kconfig"

View File

@ -46,6 +46,7 @@ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_LINKDATA) += linkdata/
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
obj-$(CONFIG_NET_VENDOR_YUNSILICON) += yunsilicon/
obj-$(CONFIG_JME) += jme.o

View File

@ -0,0 +1,20 @@
# SPDX-License-Identifier: GPL-2.0
#
# Linkdata network device configuration
#
config NET_VENDOR_LINKDATA
bool "Linkdata devices"
default y
help
If you have a network (Ethernet) card from Linkdata, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Intel cards. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_LINKDATA
source "drivers/net/ethernet/linkdata/sxe/Kconfig"
source "drivers/net/ethernet/linkdata/sxevf/Kconfig"
endif # NET_VENDOR_LINKDATA

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linkdata network device drivers.
#
obj-$(CONFIG_SXE) += sxe/
obj-$(CONFIG_SXE_VF) += sxevf/

View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
#
# sxe network device configuration
#
config SXE
tristate "sxe PCI Express adapters support"
depends on (X86 || ARM64) && PCI
select MDIO
select PHYLIB
select PTP_1588_CLOCK
default m
help
This driver supports sxe PCI Express family of adapters.
To compile this driver as a module, choose M here. The module
will be called ngbe.

View File

@ -0,0 +1,88 @@
# SPDX-License-Identifier: GPL-2.0
CONFIG_MODULE_SIG=n
# Makefile path
MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST)))
# current path
CURDIR :=$(shell dirname $(MAKEPATH))
ifneq ($(KERNELRELEASE),)
# compile
CONFIG_SXE ?= m
obj-$(CONFIG_SXE) += sxe.o
sxe-objs += base/log/sxe_log.o \
base/trace/sxe_trace.o \
sxepf/sxe_xdp.o \
sxepf/sxe_csum.o \
sxepf/sxe_dcb.o \
sxepf/sxe_dcb_nl.o \
sxepf/sxe_debug.o \
sxepf/sxe_debugfs.o \
sxepf/sxe_ethtool.o \
sxepf/sxe_filter.o \
sxepf/sxe_host_cli.o \
sxepf/sxe_host_hdc.o \
sxepf/sxe_hw.o \
sxepf/sxe_ipsec.o \
sxepf/sxe_irq.o \
sxepf/sxe_main.o \
sxepf/sxe_monitor.o \
sxepf/sxe_netdev.o \
sxepf/sxe_pci.o \
sxepf/sxe_phy.o \
sxepf/sxe_ptp.o \
sxepf/sxe_ring.o \
sxepf/sxe_rx_proc.o \
sxepf/sxe_sriov.o \
sxepf/sxe_tx_proc.o
# add compile ccflags and macro
ccflags-y += -Werror
ccflags-y += -I$(CURDIR)/sxepf
ccflags-y += -I$(CURDIR)/include/sxe
ccflags-y += -I$(CURDIR)/include
ccflags-y += -I$(CURDIR)/base/compat
ccflags-y += -I$(CURDIR)/base/trace
ccflags-y += -I$(CURDIR)/base/log
ccflags-y += -DSXE_HOST_DRIVER
ccflags-y += -DSXE_DRIVER_RELEASE
ccflags-$(CONFIG_DCB) += -DSXE_DCB_CONFIGURE
ifeq ($(CONFIG_SXE), y)
ifeq ($(CONFIG_DCA), y)
ccflags-y += -DSXE_TPH_CONFIGURE
endif
else
ifneq ($(CONFIG_DCA), )
ccflags-y += -DSXE_TPH_CONFIGURE
endif
endif
os_type = $(shell sed -n '/^ID=/p' /etc/os-release | awk -F '=' '{print $$2}' | sed 's/\"//g' | sed 's/ID=//g')
ifeq (${os_type}, opencloudos)
ccflags-y += -DOPENCLOUDOS
endif
# get linux kernel version code
ifneq ($(wildcard $(CURDIR)/vercode_build.sh),)
KER_DIR=$(srctree)
SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR))
ifneq ($(SPECIFIC_LINUX),)
ccflags-y += -DSPECIFIC_LINUX
ccflags-y += -D$(SPECIFIC_LINUX)
endif
endif
else # KERNELRELEASE
# kernel build path
KDIR := /lib/modules/$(shell uname -r)/build
all:
@$(MAKE) -C $(KDIR) M=$(CURDIR) modules
clean:
@rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt
@rm -rf ./sxepf/*.o ./sxepf/.*.cmd
@rm -rf ./base/log/*.o ./base/trace/*.o
endif # KERNELRELEASE

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_compat.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_COMPAT_H__
#define __SXE_COMPAT_H__
#include "sxe_compat_gcc.h"
#include <linux/filter.h>
#include <linux/version.h>
#define HAVE_XDP_SUPPORT
#define HAVE_AF_XDP_ZERO_COPY
#define HAVE_MEM_TYPE_XSK_BUFF_POOL
#define HAVE_XDP_BUFF_DATA_META
#define HAVE_XDP_BUFF_FRAME_SIZE
#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS
#define XDP_XMIT_FRAME_FAILED_NEED_FREE
#define HAVE_NETDEV_BPF_XSK_BUFF_POOL
#define HAVE_SKB_XMIT_MORE
#define HAVE_TIMEOUT_TXQUEUE_IDX
#define HAVE_NETDEV_NESTED_PRIV
#define HAVE_NET_PREFETCH_API
#define HAVE_NDO_FDB_ADD_EXTACK
#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
#define HAVE_NDO_SET_VF_LINK_STATE
#define HAVE_NDO_XSK_WAKEUP
#define HAVE_MACVLAN_OFFLOAD_SUPPORT
#define HAVE_PTP_CLOCK_INFO_ADJFINE
#define SXE_LOG_OLD_FS
#define SXE_LOG_FS_NOTIFY
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_compat_gcc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_COMPAT_GCC_H__
#define __SXE_COMPAT_GCC_H__
#ifdef __has_attribute
#if __has_attribute(__fallthrough__)
#define fallthrough __attribute__((__fallthrough__))
#else
#define fallthrough \
do { \
} while (0)
#endif
#else
#define fallthrough \
do { \
} while (0)
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,200 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_log.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef _SXE_LOG_H_
#define _SXE_LOG_H_
#include <linux/stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define LOG_INFO_PREFIX_LEN 32
#define LOG_ERROR_PREFIX_LEN 33
#define MEGABYTE 20
enum debug_level {
LEVEL_ERROR,
LEVEL_WARN,
LEVEL_INFO,
LEVEL_DEBUG,
};
static inline const s8 *sxe_debug_level_name(enum debug_level lv)
{
static const s8 *level[] = {
[LEVEL_ERROR] = "ERROR",
[LEVEL_WARN] = "WARN",
[LEVEL_INFO] = "INFO",
[LEVEL_DEBUG] = "DEBUG",
};
return level[lv];
}
#define LOG_BUG_ON(cond, fmt, ...) \
do { \
if ((cond)) { \
LOG_ERROR(fmt, ##__VA_ARGS__); \
LOG_SYNC(); \
BUG(); \
} \
} while (0)
#define DEBUG_TRACE_MAGIC 0x456789
#define BUF_SIZE (1024LL << 10)
#define PAGE_ORDER 2
#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2))
#define LOG_FILE_SIZE (200LL << 20)
#define BINARY_FILE_SIZE (200LL << 20)
#define VF_LOG_FILE_PATH "/var/log/sxevf.log"
#define VF_LOG_FILE_PREFIX "sxevf.log"
#define VF_BINARY_FILE_PATH "/var/log/sxevf.bin"
#define VF_BINARY_FILE_PREFIX "sxevf.bin"
#define LOG_FILE_PATH "/var/log/sxe.log"
#define LOG_FILE_PREFIX "sxe.log"
#define BINARY_FILE_PATH "/var/log/sxe.bin"
#define BINARY_FILE_PREFIX "sxe.bin"
#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n"
enum {
DEBUG_TYPE_STRING,
DEBUG_TYPE_BINARY,
DEBUG_TYPE_NR,
};
struct debug_func {
struct list_head list;
char name[64];
};
struct debug_file {
struct list_head list;
char name[64];
};
struct sxe_log {
struct {
char *buf;
int buf_size;
long long head;
long long tail;
/* in order to protect the data */
spinlock_t lock;
unsigned char is_drop;
};
struct {
char *file_path;
struct file *file;
long long file_pos;
long long file_size;
u32 file_num;
u32 index;
};
};
struct sxe_thread_local {
s32 magic;
char data[0];
};
struct sxe_ctxt {
struct page *page;
void *buff;
};
struct sxe_thread_key {
s32 offset;
};
struct sxe_debug {
enum debug_level level;
bool status;
u16 key_offset;
struct sxe_ctxt __percpu *ctxt;
struct list_head filter_func;
struct list_head filter_file;
struct task_struct *task;
struct sxe_log log[DEBUG_TYPE_NR];
};
void sxe_level_set(int level);
s32 sxe_level_get(void);
void sxe_bin_status_set(bool status);
s32 sxe_bin_status_get(void);
int sxe_log_init(bool is_vf);
void sxe_log_exit(void);
void sxe_log_binary(const char *file, const char *func, int line, u8 *ptr,
u64 addr, u32 size, char *str);
void sxe_log_sync(void);
#define LOG_DEBUG(fmt, ...)
#define LOG_INFO(fmt, ...)
#define LOG_WARN(fmt, ...)
#define LOG_ERROR(fmt, ...)
#define UNUSED(x) ((void)(x))
#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter)
#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter)
#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter)
#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter)
#define LOG_DEV_DEBUG(format, arg...) \
dev_dbg(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_INFO(format, arg...) \
dev_info(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_WARN(format, arg...) \
dev_warn(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_ERR(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg)
#define LOG_MSG_DEBUG(msglvl, format, arg...) \
netif_dbg(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_INFO(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_WARN(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_ERR(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: " format, ##arg)
#define LOG_PR_INFO(format, arg...) pr_info("sxe: " format, ##arg)
#define LOG_PR_WARN(format, arg...) pr_warn("sxe: " format, ##arg)
#define LOG_PR_ERR(format, arg...) pr_err("sxe: " format, ##arg)
#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: " format, ##arg)
#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: " format, ##arg)
#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: " format, ##arg)
#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: " format, ##arg)
#define LOG_SYNC()
#define SXE_BUG()
#define SXE_BUG_NO_SYNC()
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,251 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_trace.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifdef SXE_DRIVER_TRACE
#include <linux/device.h>
#include <linux/netdevice.h>
#include "sxe_trace.h"
#include "sxe_ring.h"
#include "sxe_log.h"
#define SXE_FILE_NAME_LEN (256)
#define SXE_TRACE_NS_MASK (0xFFFFFFFF)
#define SXE_TRACE_BUF_CLEAN(buf, buf_size, len) \
do { \
memset(buf, 0, buf_size); \
len = 0; \
} while (0)
struct sxe_trace_tx_ring g_sxe_trace_tx[SXE_TXRX_RING_NUM_MAX] = { { 0 } };
struct sxe_trace_rx_ring g_sxe_trace_rx[SXE_TXRX_RING_NUM_MAX] = { { 0 } };
void sxe_file_close(struct file **file)
{
filp_close(*file, NULL);
*file = NULL;
}
void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab)
{
if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) ||
unlikely(lab >= SXE_TRACE_LAB_TX_MAX))
return;
if (unlikely(lab == 0)) {
g_sxe_trace_tx[ring_idx].next++;
g_sxe_trace_tx[ring_idx].next &= SXE_TRACE_PER_RING_MASK;
memset(&g_sxe_trace_tx[ring_idx]
.timestamp[g_sxe_trace_tx[ring_idx].next],
0, sizeof(g_sxe_trace_tx[ring_idx].timestamp[0]));
}
g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next][lab] =
ktime_get_real_ns() & SXE_TRACE_NS_MASK;
}
void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab)
{
if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) ||
unlikely(lab >= SXE_TRACE_LAB_RX_MAX))
return;
if (unlikely(lab == 0)) {
g_sxe_trace_rx[ring_idx].next++;
g_sxe_trace_rx[ring_idx].next &= SXE_TRACE_PER_RING_MASK;
memset(&g_sxe_trace_rx[ring_idx]
.timestamp[g_sxe_trace_rx[ring_idx].next],
0, sizeof(g_sxe_trace_rx[ring_idx].timestamp[0]));
}
g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next][lab] =
ktime_get_real_ns() & SXE_TRACE_NS_MASK;
}
static int sxe_trace_create_file(struct file **pp_file)
{
char file_name[SXE_FILE_NAME_LEN] = {};
int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE;
int len = 0;
int rc = 0;
struct file *file;
len += snprintf(file_name, sizeof(file_name), "%s.",
SXE_TRACE_DUMP_FILE_NAME);
time_for_file_name(file_name + len, sizeof(file_name) - len);
file = filp_open(file_name, flags_new, 0666);
if (IS_ERR(file)) {
rc = (int)PTR_ERR(file);
sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n",
file_name, rc);
goto l_out;
}
*pp_file = file;
l_out:
return rc;
}
static int sxe_trace_write_file(struct file *file)
{
char *buff;
size_t buff_size = 2048;
int rc = 0;
int len = 0;
u64 spend = 0;
u64 times = 0;
u64 spend_total = 0;
u64 times_total = 0;
u64 start;
u64 end;
u32 i;
u32 j;
u32 k;
buff = kzalloc(buff_size, GFP_KERNEL);
if (!buff) {
rc = -ENOMEM;
sxe_print(KERN_ERR, NULL, "kzalloc %lu failed.\n", buff_size);
goto l_out;
}
len += snprintf(buff + len, buff_size - len, "tx trace dump:\n");
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
for (i = 0; i < ARRAY_SIZE(g_sxe_trace_tx); i++) {
spend = 0;
times = 0;
for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) {
start = g_sxe_trace_tx[i]
.timestamp[j][SXE_TRACE_LAB_TX_START];
end = g_sxe_trace_tx[i]
.timestamp[j][SXE_TRACE_LAB_TX_END];
if (start == 0 || end == 0)
continue;
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"\ttx ring %d trace %d dump:", i, j);
for (k = 0; k < SXE_TRACE_LAB_TX_MAX; k++) {
len += snprintf(buff + len, buff_size - len,
"%llu ", g_sxe_trace_tx[i].timestamp[j][k]);
}
len += snprintf(buff + len, buff_size - len,
"spend: %llu\n", end - start);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend += end - start;
times++;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"tx ring %d, spend %llu, times:%llu.\n", i,
spend, times);
spend_total += spend;
times_total += times;
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"tx trace dump, spend_total: %llu, times_total: %llu.\n",
spend_total, times_total);
len += snprintf(buff + len, buff_size - len, "rx trace dump:\n");
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend_total = 0;
times_total = 0;
for (i = 0; i < ARRAY_SIZE(g_sxe_trace_rx); i++) {
spend = 0;
times = 0;
for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) {
start = g_sxe_trace_rx[i]
.timestamp[j][SXE_TRACE_LAB_RX_START];
end = g_sxe_trace_rx[i]
.timestamp[j][SXE_TRACE_LAB_RX_END];
if (start == 0 || end == 0)
continue;
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"\trx ring %d trace %d dump:", i, j);
for (k = 0; k < SXE_TRACE_LAB_RX_MAX; k++) {
len += snprintf(buff + len, buff_size - len,
"%llu ", g_sxe_trace_rx[i].timestamp[j][k]);
}
len += snprintf(buff + len, buff_size - len,
"spend: %llu\n", end - start);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend += end - start;
times++;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"rx ring %d, spend %llu, times:%llu:\n", i,
spend, times);
spend_total += spend;
times_total += times;
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"rx trace dump, spend_total: %llu, times_total: %llu.\n",
spend_total, times_total);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
l_out:
kfree(buff);
if (rc < 0)
sxe_print(KERN_ERR, NULL, "write file error %d\n", rc);
return rc;
}
void sxe_trace_dump(void)
{
struct file *file;
int rc = 0;
rc = sxe_trace_create_file(&file);
if (!file)
goto l_out;
rc = sxe_trace_write_file(file);
if (rc < 0)
goto l_out;
l_out:
if (file)
sxe_file_close(&file);
}
#endif

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_trace.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_TRACE_H__
#define __SXE_TRACE_H__
#ifdef SXE_DRIVER_TRACE
#define SXE_TRACE_NUM_PER_RING (2048)
#define SXE_TRACE_PER_RING_MASK (0x7FF)
#ifndef SXE_TEST
#define SXE_TRACE_DUMP_FILE_NAME ("/var/log/sxe_trace_dump.log")
#else
#define SXE_TRACE_DUMP_FILE_NAME (".sxe_trace_dump.log")
#endif
enum sxe_trace_lab_tx {
SXE_TRACE_LAB_TX_START = 0,
SXE_TRACE_LAB_TX_MAY_STOP,
SXE_TRACE_LAB_TX_VLAN,
SXE_TRACE_LAB_TX_DCB,
SXE_TRACE_LAB_TX_IPSEC,
SXE_TRACE_LAB_TX_TSO,
SXE_TRACE_LAB_TX_DESC,
SXE_TRACE_LAB_TX_PPT,
SXE_TRACE_LAB_TX_FDIR,
SXE_TRACE_LAB_TX_OL_INFO,
SXE_TRACE_LAB_TX_MAP,
SXE_TRACE_LAB_TX_SENT,
SXE_TRACE_LAB_TX_UPDATE,
SXE_TRACE_LAB_TX_MAY_STOP_2,
SXE_TRACE_LAB_TX_WRITE,
SXE_TRACE_LAB_TX_END,
SXE_TRACE_LAB_TX_MAX,
};
struct sxe_trace_tx_ring {
u64 next;
u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_TX_MAX];
};
enum sxe_trace_lab_rx {
SXE_TRACE_LAB_RX_START = 0,
SXE_TRACE_LAB_RX_CLEAN,
SXE_TRACE_LAB_RX_UNMAP,
SXE_TRACE_LAB_RX_STATS,
SXE_TRACE_LAB_RX_HANG,
SXE_TRACE_LAB_RX_DONE,
SXE_TRACE_LAB_RX_WAKE,
SXE_TRACE_LAB_RX_END,
SXE_TRACE_LAB_RX_MAX,
};
struct sxe_trace_rx_ring {
u64 next;
u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_RX_MAX];
};
void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab);
void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab);
void sxe_trace_dump(void);
#define SXE_TRACE_TX(r_idx, lab) sxe_trace_tx_add(r_idx, lab)
#define SXE_TRACE_RX(r_idx, lab) sxe_trace_rx_add(r_idx, lab)
#else
#define SXE_TRACE_TX(r_idx, lab)
#define SXE_TRACE_RX(r_idx, lab)
#endif
#endif

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: drv_msg.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __DRV_MSG_H__
#define __DRV_MSG_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_VERSION_LEN 32
struct sxe_version_resp {
u8 fw_version[SXE_VERSION_LEN];
};
#endif

View File

@ -0,0 +1,224 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_cli.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_CLI_H__
#define __SXE_CLI_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_VERION_LEN (32)
#define SXE_MAC_NUM (128)
#define SXE_PORT_TRANSCEIVER_LEN (32)
#define SXE_PORT_VENDOR_LEN (32)
#define SXE_CHIP_TYPE_LEN (32)
#define SXE_VPD_SN_LEN (16)
#define SXE_SOC_RST_TIME (0x93A80)
#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)
#define MGC_TERMLOG_INFO_MAX_LEN (12 * 1024)
#define SXE_REGS_DUMP_MAX_LEN (12 * 1024)
#define SXE_PRODUCT_NAME_LEN (32)
enum sxe_led_mode {
SXE_IDENTIFY_LED_BLINK_ON = 0,
SXE_IDENTIFY_LED_BLINK_OFF,
SXE_IDENTIFY_LED_ON,
SXE_IDENTIFY_LED_OFF,
SXE_IDENTIFY_LED_RESET,
};
struct sxe_led_ctrl {
u32 mode;
u32 duration;
};
struct sxe_led_ctrl_resp {
u32 ack;
};
enum portlinkspeed {
PORT_LINK_NO = 0,
PORT_LINK_100M = 1,
PORT_LINK_1G = 2,
PORT_LINK_10G = 3,
};
struct syssocinfo {
s8 fwver[SXE_VERION_LEN];
s8 optver[SXE_VERION_LEN];
u8 socstatus;
u8 pad[3];
s32 soctemp;
u64 chipid;
s8 chiptype[SXE_CHIP_TYPE_LEN];
s8 pba[SXE_VPD_SN_LEN];
s8 productname[SXE_PRODUCT_NAME_LEN];
};
struct sysportinfo {
u64 mac[SXE_MAC_NUM];
u8 isportabs;
u8 linkstat;
u8 linkspeed;
u8 issfp : 1;
u8 isgetinfo : 1;
u8 rvd : 6;
s8 opticalmodtemp;
u8 pad[3];
s8 transceivertype[SXE_PORT_TRANSCEIVER_LEN];
s8 vendorname[SXE_PORT_VENDOR_LEN];
s8 vendorpn[SXE_PORT_VENDOR_LEN];
};
struct sysinforesp {
struct syssocinfo socinfo;
struct sysportinfo portinfo;
};
enum sfptemptdmode {
SFP_TEMP_THRESHOLD_MODE_ALARM = 0,
SFP_TEMP_THRESHOLD_MODE_WARN,
};
struct sfptemptdset {
u8 mode;
u8 pad[3];
s8 hthreshold;
s8 lthreshold;
};
struct sxelogexportresp {
u16 curloglen;
u8 isend;
u8 pad;
s32 sessionid;
s8 data[0];
};
enum sxelogexporttype {
SXE_LOG_EXPORT_REQ = 0,
SXE_LOG_EXPORT_FIN,
SXE_LOG_EXPORT_ABORT,
};
struct sxelogexportreq {
u8 isalllog;
u8 cmdtype;
u8 isbegin;
u8 pad;
s32 sessionid;
u32 loglen;
};
struct socrstreq {
u32 time;
};
struct regsdumpresp {
u32 curdwlen;
u8 data[0];
};
enum {
SXE_MFG_PART_NUMBER_LEN = 8,
SXE_MFG_SERIAL_NUMBER_LEN = 16,
SXE_MFG_REVISION_LEN = 4,
SXE_MFG_OEM_STR_LEN = 64,
SXE_MFG_SXE_BOARD_ASSEMBLY_LEN = 32,
SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
SXE_MFG_SXE_MAC_ADDR_CNT = 2,
};
struct sxemfginfo {
u8 partnumber[SXE_MFG_PART_NUMBER_LEN];
u8 serialnumber[SXE_MFG_SERIAL_NUMBER_LEN];
u32 mfgdate;
u8 revision[SXE_MFG_REVISION_LEN];
u32 reworkdate;
u8 pad[4];
u64 macaddr[SXE_MFG_SXE_MAC_ADDR_CNT];
u8 boardtracenum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN];
u8 boardassembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];
u8 extra1[SXE_MFG_OEM_STR_LEN];
u8 extra2[SXE_MFG_OEM_STR_LEN];
};
struct sxelldpinfo {
u8 lldpstate;
u8 pad[3];
};
struct regsdumpreq {
u32 baseaddr;
u32 dwlen;
};
enum sxe_pcs_mode {
SXE_PCS_MODE_1000BASE_KX_WO = 0,
SXE_PCS_MODE_1000BASE_KX_W,
SXE_PCS_MODE_SGMII,
SXE_PCS_MODE_10GBASE_KR_WO,
SXE_PCS_MODE_AUTO_NEGT_73,
SXE_PCS_MODE_LPBK_PHY_TX2RX,
SXE_PCS_MODE_LPBK_PHY_RX2TX,
SXE_PCS_MODE_LPBK_PCS_RX2TX,
SXE_PCS_MODE_BUTT,
};
enum sxe_remote_fault {
SXE_REMOTE_FALUT_NO_ERROR = 0,
SXE_REMOTE_FALUT_OFFLINE,
SXE_REMOTE_FALUT_LINK_FAILURE,
SXE_REMOTE_FALUT_AUTO_NEGOTIATION,
SXE_REMOTE_UNKNOWN,
};
struct sxe_phy_cfg {
enum sxe_pcs_mode mode;
u32 mtu;
};
enum sxe_an_speed {
SXE_AN_SPEED_NO_LINK = 0,
SXE_AN_SPEED_100M,
SXE_AN_SPEED_1G,
SXE_AN_SPEED_10G,
SXE_AN_SPEED_UNKNOWN,
};
enum sxe_phy_pause_cap {
SXE_PAUSE_CAP_NO_PAUSE = 0,
SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,
SXE_PAUSE_CAP_SYMMETRIC_PAUSE,
SXE_PAUSE_CAP_BOTH_PAUSE,
SXE_PAUSE_CAP_UNKNOWN,
};
enum sxe_phy_duplex_type {
SXE_FULL_DUPLEX = 0,
SXE_HALF_DUPLEX = 1,
SXE_UNKNOWN_DUPLEX,
};
struct sxe_phy_an_cap {
enum sxe_remote_fault remote_fault;
enum sxe_phy_pause_cap pause_cap;
enum sxe_phy_duplex_type duplex_cap;
};
struct sxe_an_cap {
struct sxe_phy_an_cap local;
struct sxe_phy_an_cap peer;
};
#endif

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_hdc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_HDC_H__
#define __SXE_HDC_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define HDC_CACHE_TOTAL_LEN (16 * 1024)
#define ONE_PACKET_LEN_MAX (1024)
#define DWORD_NUM (256)
#define HDC_TRANS_RETRY_COUNT (3)
enum sxehdcerrnocode {
PKG_OK = 0,
PKG_ERR_REQ_LEN,
PKG_ERR_RESP_LEN,
PKG_ERR_PKG_SKIP,
PKG_ERR_NODATA,
PKG_ERR_PF_LK,
PKG_ERR_OTHER,
};
union hdcheader {
struct {
u8 pid : 4;
u8 errcode : 4;
u8 len;
u16 startpkg : 1;
u16 endpkg : 1;
u16 isrd : 1;
u16 msi : 1;
u16 totallen : 12;
} head;
u32 dw0;
};
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ioctl.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef _SXE_IOCTL_H_
#define _SXE_IOCTL_H_
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
struct sxeioctlsynccmd {
u64 traceid;
void *indata;
u32 inlen;
void *outdata;
u32 outlen;
};
#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct sxeioctlsynccmd)
#endif

View File

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_msg.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_MSG_H__
#define __SXE_MSG_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_MAC_ADDR_LEN 6
#define SXE_HDC_CMD_HDR_SIZE sizeof(struct sxe_hdc_cmd_hdr)
#define SXE_HDC_MSG_HDR_SIZE sizeof(struct sxe_hdc_drv_cmd_msg)
enum sxe_cmd_type {
SXE_CMD_TYPE_CLI,
SXE_CMD_TYPE_DRV,
SXE_CMD_TYPE_UNKNOWN,
};
struct sxe_hdc_cmd_hdr {
u8 cmd_type;
u8 cmd_sub_type;
u8 reserve[6];
};
enum sxefwstate {
SXE_FW_START_STATE_UNDEFINED = 0x00,
SXE_FW_START_STATE_INIT_BASE = 0x10,
SXE_FW_START_STATE_SCAN_DEVICE = 0x20,
SXE_FW_START_STATE_FINISHED = 0x30,
SXE_FW_START_STATE_UPGRADE = 0x31,
SXE_FW_RUNNING_STATE_ABNOMAL = 0x40,
SXE_FW_START_STATE_MASK = 0xF0,
};
struct sxefwstateinfo {
u8 socstatus;
char statbuff[32];
};
enum msievt {
MSI_EVT_SOC_STATUS = 0x1,
MSI_EVT_HDC_FWOV = 0x2,
MSI_EVT_HDC_TIME_SYNC = 0x4,
MSI_EVT_MAX = 0x80000000,
};
enum sxefwhdcstate {
SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
SXE_FW_HDC_TRANSACTION_BUSY,
SXE_FW_HDC_TRANSACTION_ERR,
};
enum sxe_hdc_cmd_opcode {
SXE_CMD_SET_WOL = 1,
SXE_CMD_LED_CTRL,
SXE_CMD_SFP_READ,
SXE_CMD_SFP_WRITE,
SXE_CMD_TX_DIS_CTRL = 5,
SXE_CMD_TINE_SYNC,
SXE_CMD_RATE_SELECT,
SXE_CMD_R0_MAC_GET,
SXE_CMD_LOG_EXPORT,
SXE_CMD_FW_VER_GET = 10,
SXE_CMD_PCS_SDS_INIT,
SXE_CMD_AN_SPEED_GET,
SXE_CMD_AN_CAP_GET,
SXE_CMD_GET_SOC_INFO,
SXE_CMD_MNG_RST = 15,
SXE_CMD_MAX,
};
enum sxe_hdc_cmd_errcode {
SXE_ERR_INVALID_PARAM = 1,
};
struct sxe_hdc_drv_cmd_msg {
u16 opcode;
u16 errcode;
union datalength {
u16 req_len;
u16 ack_len;
} length;
u8 reserve[8];
u64 traceid;
u8 body[0];
};
struct sxe_sfp_rw_req {
u16 offset;
u16 len;
u8 write_data[0];
};
struct sxe_sfp_read_resp {
u16 len;
u8 resp[0];
};
enum sxe_sfp_rate {
SXE_SFP_RATE_1G = 0,
SXE_SFP_RATE_10G = 1,
};
struct sxe_sfp_rate_able {
enum sxe_sfp_rate rate;
};
struct sxe_spp_tx_able {
bool isdisable;
};
struct sxe_default_mac_addr_resp {
u8 addr[SXE_MAC_ADDR_LEN];
};
struct sxe_mng_rst {
bool enable;
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_version.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_VER_H__
#define __SXE_VER_H__
#define SXE_VERSION "1.5.0.30"
#define SXE_COMMIT_ID "cd8fdce"
#define SXE_BRANCH "develop/rc/sagitta-1.5.0_B030-Anolis"
#define SXE_BUILD_TIME "2025-05-13 20:33:58"
#define SXE_DRV_NAME "sxe"
#define SXEVF_DRV_NAME "sxevf"
#define SXE_DRV_LICENSE "GPL v2"
#define SXE_DRV_AUTHOR "sxe"
#define SXEVF_DRV_AUTHOR "sxevf"
#define SXE_DRV_DESCRIPTION "sxe driver"
#define SXEVF_DRV_DESCRIPTION "sxevf driver"
#define SXE_FW_NAME "soc"
#define SXE_FW_ARCH "arm32"
#ifndef PS3_CFG_RELEASE
#define PS3_SXE_FW_BUILD_MODE "debug"
#else
#define PS3_SXE_FW_BUILD_MODE "release"
#endif
#endif

View File

@ -0,0 +1,414 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_H__
#define __SXE_H__
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
#include <linux/if_vlan.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include "sxe_log.h"
#include "sxe_hw.h"
#include "sxe_irq.h"
#include "sxe_phy.h"
#include "sxe_monitor.h"
#include "sxe_ipsec.h"
#include "sxe_dcb.h"
#include "sxe_errno.h"
#include "drv_msg.h"
#include "sxe_compat.h"
#define SXE_ETH_DEAD_LOAD (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)
#define SXE_MAX_JUMBO_FRAME_SIZE 9728
#define DEV_NAME_LEN 16
#define CHAR_BITS (8)
#define SXE_HZ_TRANSTO_MS 1000
#define PCI_BDF_DEV_SHIFT (3)
#define PCI_BDF_DEV_MASK (0x1F)
#define PCI_BDF_FUNC_MASK (0x7)
#ifdef SXE_TEST
#define SXE_MAX_MACVLANS 3
#else
#define SXE_MAX_MACVLANS 63
#endif
#define SXE_KFREE(addr) \
do { \
void *_addr = (addr); \
kfree(_addr); \
_addr = NULL; \
} while (0)
enum adapter_cap {
SXE_DCB_ENABLE = BIT(0),
SXE_SRIOV_ENABLE = BIT(1),
SXE_FNAV_SAMPLE_ENABLE = BIT(2),
SXE_FNAV_SPECIFIC_ENABLE = BIT(3),
SXE_SRIOV_DCB_ENABLE = (SXE_DCB_ENABLE | SXE_SRIOV_ENABLE),
SXE_LRO_ENABLE = BIT(4),
SXE_RSS_FIELD_IPV4_UDP = BIT(5),
SXE_RSS_FIELD_IPV6_UDP = BIT(6),
SXE_RX_LEGACY = BIT(7),
SXE_RX_HWTSTAMP_ENABLED = BIT(8),
SXE_MSI_ENABLED = BIT(9),
SXE_MSIX_ENABLED = BIT(10),
SXE_VLAN_PROMISC = BIT(11),
SXE_LRO_CAPABLE = BIT(12),
SXE_RSS_ENABLE = BIT(13),
SXE_MACVLAN_ENABLE = BIT(14),
SXE_1588V2_ONE_STEP = BIT(15),
SXE_PTP_PPS_ENABLED = BIT(16),
SXE_RX_HWTSTAMP_IN_REGISTER = BIT(17),
SXE_TPH_CAPABLE = BIT(18),
SXE_TPH_ENABLE = BIT(19),
#ifdef SXE_IPSEC_CONFIGURE
SXE_IPSEC_ENABLED = BIT(20),
SXE_VF_IPSEC_ENABLED = BIT(21),
#endif
};
enum sxe_nic_state {
SXE_RESETTING,
SXE_TESTING,
SXE_DOWN,
SXE_DISABLED,
SXE_REMOVING,
SXE_PTP_RUNNING,
SXE_PTP_TX_IN_PROGRESS,
SXE_IN_SFP_INIT,
SXE_SFP_MULTI_SPEED_SETTING,
};
struct sxe_sw_stats {
u64 tx_busy;
u64 non_eop_descs;
u64 lro_total_count;
u64 lro_total_flush;
u64 fnav_overflow;
u64 reset_work_trigger_cnt;
u64 restart_queue;
u64 hw_csum_rx_error;
u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 tx_hwtstamp_timeouts;
u64 tx_hwtstamp_skipped;
u64 rx_hwtstamp_cleared;
u64 tx_ipsec;
u64 rx_ipsec;
u64 link_state_change_cnt;
};
struct sxe_stats_info {
struct sxe_sw_stats sw;
struct sxe_mac_stats hw;
/* in order to protect the data */
struct mutex stats_mutex;
};
struct sxe_macvlan {
struct net_device *netdev;
u32 tx_ring_offset;
u32 rx_ring_offset;
s32 pool;
};
struct sxe_fnav_rule_node {
struct hlist_node node;
union sxe_fnav_rule_info rule_info;
u16 sw_idx;
u64 ring_cookie;
};
struct sxe_fnav_context {
u32 rules_table_size;
u32 sample_rate;
/* in order to protect the data */
spinlock_t sample_lock;
u32 sample_rules_cnt;
time64_t fdir_overflow_time;
bool is_sample_table_overflowed;
DECLARE_HASHTABLE(sample_list, 13);
/* in order to protect the data */
spinlock_t specific_lock;
u32 rule_cnt;
struct hlist_head rules_list;
union sxe_fnav_rule_info rules_mask;
};
struct sxe_vf_uc_addr_list {
struct list_head list;
u8 vf_idx;
bool free;
bool is_macvlan;
u8 uc_addr[ETH_ALEN];
};
struct sxe_vf_info {
u8 mac_addr[ETH_ALEN];
u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX];
u8 mc_hash_used;
u16 pf_vlan;
u16 pf_qos;
u8 cast_mode;
u8 trusted : 1;
u8 is_ready : 1;
u8 spoof_chk_enabled : 1;
u8 rss_query_enabled : 1;
u8 mac_from_pf : 1;
u8 need_mpe : 1;
u8 reserved : 2;
u16 tx_rate;
s32 link_enable;
#ifdef HAVE_NDO_SET_VF_LINK_STATE
s32 link_state;
#endif
struct pci_dev *vf_dev;
u32 mbx_version;
};
struct sxe_virtual_context {
u8 num_vfs;
u8 num_mode_promisc;
u8 num_allmulti_or_promisc;
u16 bridge_mode;
u32 mbps_link_speed;
bool is_rate_set;
struct sxe_vf_uc_addr_list head;
struct sxe_vf_uc_addr_list *vf_uc_list;
struct sxe_vf_info *vf_info;
u32 err_refcount;
/* in order to protect the data */
spinlock_t vfs_lock;
DECLARE_BITMAP(pf_pool_bitmap, SXE_MAX_MACVLANS + 1);
};
struct sxe_ptp_context {
struct cyclecounter hw_cc;
struct timecounter hw_tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
/* in order to protect the data */
spinlock_t ptp_timer_lock;
void (*ptp_setup_spp)(struct sxe_adapter *adapter);
u32 tx_hwtstamp_sec;
u32 tx_hwtstamp_nsec;
};
struct sxe_dcb_context {
#ifdef SXE_DCB_CONFIGURE
struct ieee_pfc *ieee_pfc;
struct ieee_ets *ieee_ets;
#endif
struct sxe_dcb_cee_config cee_cfg;
struct sxe_dcb_cee_config cee_temp_cfg;
u8 cee_cfg_bitmap;
u8 hw_tcs;
u8 dcbx_cap;
u8 default_up;
enum sxe_fc_mode last_lfc_mode;
};
struct sxe_uc_addr_table {
u8 addr[ETH_ALEN];
u16 pool;
unsigned long state;
};
struct sxe_mac_filter_context {
u8 cur_mac_addr[ETH_ALEN];
u8 def_mac_addr[ETH_ALEN];
/* in order to protect the data */
spinlock_t uc_table_lock;
struct sxe_uc_addr_table *uc_addr_table;
u32 mc_hash_table[SXE_MTA_ENTRY_NUM_MAX];
u32 mc_hash_table_used;
};
struct sxe_vlan_context {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 vlan_table_size;
};
struct sxe_cdev_info {
struct cdev cdev;
dev_t dev_no;
struct device *device;
};
struct sxe_self_test_context {
u32 icr;
struct sxe_ring tx_ring;
struct sxe_ring rx_ring;
};
struct sxe_hdc_context {
struct completion sync_done;
struct work_struct time_sync_work;
u16 time_sync_failed;
};
struct sxe_fw_info {
u8 fw_version[SXE_VERSION_LEN];
};
struct sxe_adapter {
char dev_name[DEV_NAME_LEN];
struct net_device *netdev;
struct pci_dev *pdev;
struct sxe_hw hw;
u32 cap;
u32 cap2;
unsigned long state;
struct sxe_link_info link;
u16 msg_enable;
struct sxe_ring_feature ring_f;
struct sxe_pool_feature pool_f;
struct sxe_ring_context rx_ring_ctxt;
struct sxe_ring_context tx_ring_ctxt;
struct sxe_ring_context xdp_ring_ctxt;
struct sxe_monitor_context monitor_ctxt;
struct sxe_irq_context irq_ctxt;
struct sxe_fnav_context fnav_ctxt;
struct sxe_virtual_context vt_ctxt;
#ifdef SXE_IPSEC_CONFIGURE
struct sxe_ipsec_context ipsec;
#endif
struct bpf_prog *xdp_prog;
#ifdef HAVE_AF_XDP_ZERO_COPY
unsigned long *af_xdp_zc_qps;
#endif
u32 *rss_key;
u8 rss_indir_tbl[SXE_MAX_RETA_ENTRIES];
struct sxe_ptp_context ptp_ctxt;
struct sxe_dcb_context dcb_ctxt;
struct sxe_vlan_context vlan_ctxt;
struct sxe_mac_filter_context mac_filter_ctxt;
struct sxe_stats_info stats;
struct sxe_fw_info fw_info;
struct dentry *debugfs_entries;
struct sxe_phy_context phy_ctxt;
struct sxe_self_test_context test_ctxt;
struct sxe_cdev_info cdev_info;
struct sxe_hdc_context hdc_ctxt;
u16 bridge_mode;
#ifdef SXE_WOL_CONFIGURE
u32 wol;
#endif
};
struct sxe_fnav_sample_work_info {
struct work_struct work_st;
struct sxe_adapter *adapter;
u64 hash;
};
struct sxe_fnav_sample_filter {
struct hlist_node hlist;
u32 hash;
};
static inline u8 sxe_dcb_tc_get(struct sxe_adapter *adapter)
{
return adapter->dcb_ctxt.hw_tcs;
}
static inline void sxe_dcb_tc_set(struct sxe_adapter *adapter, u8 tcs)
{
adapter->dcb_ctxt.hw_tcs = tcs;
}
static inline u8 sxe_rxtx_pkt_buf_max(struct sxe_adapter *adapter)
{
return (adapter->cap & SXE_DCB_ENABLE) ? SXE_PKG_BUF_NUM_MAX : 1;
}
struct workqueue_struct *sxe_workqueue_get(void);
void sxe_fw_version_get(struct sxe_adapter *adapter);
s32 sxe_ring_irq_init(struct sxe_adapter *adapter);
void sxe_ring_irq_exit(struct sxe_adapter *adapter);
s32 sxe_hw_reset(struct sxe_adapter *adapter);
void sxe_hw_start(struct sxe_hw *hw);
static inline void stats_lock(struct sxe_adapter *adapter)
{
mutex_lock(&adapter->stats.stats_mutex);
}
static inline void stats_unlock(struct sxe_adapter *adapter)
{
mutex_unlock(&adapter->stats.stats_mutex);
}
static inline void carrier_lock(struct sxe_adapter *adapter)
{
mutex_lock(&adapter->link.carrier_mutex);
}
static inline void carrier_unlock(struct sxe_adapter *adapter)
{
mutex_unlock(&adapter->link.carrier_mutex);
}
void sxe_tph_update(struct sxe_irq_data *irq_data);
void sxe_tph_setup(struct sxe_adapter *adapter);
#endif

View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_csum.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netdev_features.h>
#include "sxe_csum.h"
#include "sxe_ring.h"
#include "sxe_tx_proc.h"
#include "sxe_log.h"
#ifndef HAVE_SKB_CSUM_SCTP_API
static inline bool sxe_is_sctp_ipv4(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
if (protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_SCTP) {
LOG_DEBUG("protocal:%d tx packet type is ipv4 sctp.\n",
protocol);
ret = true;
}
return ret;
}
static inline bool sxe_is_sctp_ipv6(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
u32 offset = skb_checksum_start_offset(skb);
u32 hdr_offset = 0;
ipv6_find_hdr(skb, &hdr_offset, IPPROTO_SCTP, NULL, NULL);
if (protocol == htons(ETH_P_IPV6) && offset == hdr_offset) {
LOG_DEBUG("protocal:%d offset:%d tx packet type is ipv6 sctp.\n",
protocol, offset);
ret = true;
}
return ret;
}
static inline bool sxe_prot_is_sctp(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
if (sxe_is_sctp_ipv4(protocol, skb) ||
sxe_is_sctp_ipv6(protocol, skb)) {
ret = true;
}
return ret;
}
#else
#define sxe_prot_is_sctp(protocol, skb) skb_csum_is_sctp(skb)
#endif
void sxe_tx_csum_offload(struct sxe_ring *tx_ring, struct sxe_tx_buffer *first,
struct sxe_tx_context_desc *ctxt_desc)
{
u16 tucmd;
u16 ip_len;
u16 mac_len;
struct sk_buff *skb = first->skb;
struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev);
LOG_DEBUG_BDF("tx ring[%d] ip_summed:%d\n"
"\tcsum_offset:%d csum_start:%d protocol:%d\n"
"\tnetdev features:0x%llx\n",
tx_ring->idx, skb->ip_summed, skb->csum_offset,
skb->csum_start, skb->protocol,
tx_ring->netdev->features);
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto no_checksum;
switch (skb->csum_offset) {
case SXE_TCP_CSUM_OFFSET:
tucmd = SXE_TX_CTXTD_TUCMD_L4T_TCP;
break;
case SXE_UDP_CSUM_OFFSET:
tucmd = SXE_TX_CTXTD_TUCMD_L4T_UDP;
break;
case SXE_SCTP_CSUM_OFFSET:
if (sxe_prot_is_sctp(first->protocol, skb)) {
tucmd = SXE_TX_CTXTD_TUCMD_L4T_SCTP;
break;
}
fallthrough;
default:
skb_checksum_help(skb);
goto no_checksum;
}
first->tx_features |= SXE_TX_FEATURE_CSUM;
ip_len = skb_checksum_start_offset(skb) - skb_network_offset(skb);
mac_len = skb_network_offset(skb);
sxe_ctxt_desc_tucmd_set(ctxt_desc, tucmd);
sxe_ctxt_desc_iplen_set(ctxt_desc, ip_len);
sxe_ctxt_desc_maclen_set(ctxt_desc, mac_len);
LOG_DEBUG_BDF("tx ring[%d] L3 protocol:%d tucmd:0x%x\n"
"\tiplen:0x%x mac_len:0x%x, tx_features:0x%x\n",
tx_ring->idx, first->protocol, tucmd, ip_len, mac_len,
first->tx_features);
no_checksum:
;
}
void sxe_rx_csum_verify(struct sxe_ring *ring, union sxe_rx_data_desc *desc,
struct sk_buff *skb)
{
#ifndef SXE_DRIVER_RELEASE
__le16 pkt_info = desc->wb.lower.lo_dword.hs_rss.pkt_info;
LOG_DEBUG("rx ring[%d] csum verify ip_summed:%d\n"
"\tcsum_offset:%d csum_start:%d pkt_info:0x%x\n"
"\tnetdev features:0x%llx\n",
ring->idx, skb->ip_summed, skb->csum_offset, skb->csum_start,
pkt_info, ring->netdev->features);
#endif
skb_checksum_none_assert(skb);
if (!(ring->netdev->features & NETIF_F_RXCSUM)) {
LOG_WARN("rx ring[%d] no offload checksum verify.\n",
ring->idx);
goto l_out;
}
if (sxe_status_err_check(desc, SXE_RXD_STAT_IPCS) &&
sxe_status_err_check(desc, SXE_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++;
LOG_ERROR("rx ring [%d] ip checksum fail.csum_err:%llu\n",
ring->idx, ring->rx_stats.csum_err);
goto l_out;
}
if (sxe_status_err_check(desc, SXE_RXD_STAT_LB)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
goto l_out;
}
if (!sxe_status_err_check(desc, SXE_RXD_STAT_L4CS)) {
LOG_DEBUG("rx ring[%d] no need verify L4 checksum\n",
ring->idx);
goto l_out;
}
if (sxe_status_err_check(desc, SXE_RXDADV_ERR_L4E)) {
ring->rx_stats.csum_err++;
LOG_ERROR("rx ring[%d] L4 checksum verify error.csum_err:%llu\n",
ring->idx, ring->rx_stats.csum_err);
goto l_out;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
LOG_DEBUG("rx ring[%d] ip_summed:%d sxe hw\n"
"\tverify checksum pass.\n", ring->idx, skb->ip_summed);
l_out:
;
}

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_csum.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_CSUM_H__
#define __SXE_CSUM_H__
#include <net/ipv6.h>
#include <net/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include "sxe_ipsec.h"
#include "sxe.h"
#ifdef NOT_INCLUDE_SCTP_H
struct sctphdr {
__be16 source;
__be16 dest;
__be32 vtag;
__le32 checksum;
} __packed;
#else
#include <linux/sctp.h>
#endif
#define SXE_TCP_CSUM_OFFSET (offsetof(struct tcphdr, check))
#define SXE_UDP_CSUM_OFFSET (offsetof(struct udphdr, check))
#define SXE_SCTP_CSUM_OFFSET (offsetof(struct sctphdr, checksum))
void sxe_tx_csum_offload(struct sxe_ring *tx_ring, struct sxe_tx_buffer *first,
struct sxe_tx_context_desc *ctxt_desc);
void sxe_rx_csum_verify(struct sxe_ring *ring, union sxe_rx_data_desc *desc,
struct sk_buff *skb);
#endif

View File

@ -0,0 +1,452 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_dcb.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe.h"
#ifdef SXE_DCB_CONFIGURE
#define SXE_TC_BWG_PERCENT_PER_CHAN (12)
void sxe_dcb_init(struct sxe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg;
struct sxe_tc_config *tc;
u32 tc_index;
netdev->dcbnl_ops = &sxe_dcbnl_ops;
cee_cfg->num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
cee_cfg->num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
tc = &cee_cfg->tc_config[tc_index];
tc->channel[DCB_PATH_TX].bwg_id = 0;
tc->channel[DCB_PATH_TX].bwg_percent =
SXE_TC_BWG_PERCENT_PER_CHAN + (tc_index & 1);
tc->channel[DCB_PATH_RX].bwg_id = 0;
tc->channel[DCB_PATH_RX].bwg_percent =
SXE_TC_BWG_PERCENT_PER_CHAN + (tc_index & 1);
tc->pfc_type = pfc_disabled;
}
tc = &cee_cfg->tc_config[0];
tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0xFF;
tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0xFF;
cee_cfg->bwg_link_percent[DCB_PATH_TX][0] = SXE_PERCENT_100;
cee_cfg->bwg_link_percent[DCB_PATH_RX][0] = SXE_PERCENT_100;
cee_cfg->pfc_mode_enable = false;
adapter->dcb_ctxt.cee_cfg_bitmap = 0x00;
adapter->dcb_ctxt.dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
memcpy(&adapter->dcb_ctxt.cee_temp_cfg, cee_cfg,
sizeof(adapter->dcb_ctxt.cee_temp_cfg));
}
static u32 sxe_dcb_min_credit_get(u32 max_frame)
{
return ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / DCB_CREDIT_QUANTUM;
}
static u16
sxe_dcb_cee_tc_link_percent_get(struct sxe_dcb_cee_config *cee_config,
u8 direction, u8 tc_index)
{
u8 bw_percent;
u16 link_percentage;
struct sxe_tc_bw_alloc *tc_info;
tc_info = &cee_config->tc_config[tc_index].channel[direction];
link_percentage =
cee_config->bwg_link_percent[direction][tc_info->bwg_id];
bw_percent = tc_info->bwg_percent;
link_percentage = (link_percentage * bw_percent) / SXE_PERCENT_100;
return link_percentage;
}
static u32
sxe_dcb_cee_min_link_percent_get(struct sxe_dcb_cee_config *cee_config,
u8 direction)
{
u8 tc_index;
u16 link_percentage;
u32 min_link_percent = SXE_PERCENT_100;
for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
link_percentage = sxe_dcb_cee_tc_link_percent_get(cee_config,
direction, tc_index);
if (link_percentage && link_percentage < min_link_percent)
min_link_percent = link_percentage;
}
return min_link_percent;
}
s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
struct sxe_dcb_cee_config *cee_config,
u32 max_frame, u8 direction)
{
s32 ret = 0;
struct sxe_adapter *adapter = hw->adapter;
struct sxe_tc_bw_alloc *tc_info;
u32 min_credit;
u32 total_credit;
u32 min_link_percent;
u32 credit_refill;
u32 credit_max;
u16 link_percentage;
u8 tc_index;
if (!cee_config) {
ret = -DCB_ERR_CONFIG;
LOG_ERROR_BDF("config info is NULL\n");
goto l_ret;
}
LOG_DEBUG_BDF("cee_config[%p] input max_frame[%u] direction[%s]\n",
cee_config, max_frame, direction ? "RX" : "TX");
min_credit = sxe_dcb_min_credit_get(max_frame);
LOG_DEBUG_BDF("cee_config[%p] max_frame[%u] got min_credit[%u]\n",
cee_config, max_frame, min_credit);
min_link_percent =
sxe_dcb_cee_min_link_percent_get(cee_config, direction);
LOG_DEBUG_BDF("cee_config[%p] direction[%s] got min_link_percent[%u]\n",
cee_config, direction ? "RX" : "TX", min_link_percent);
total_credit = (min_credit / min_link_percent) + 1;
LOG_DEBUG_BDF("cee_config[%p] total_credit=%u\n", cee_config,
total_credit);
for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
tc_info = &cee_config->tc_config[tc_index].channel[direction];
link_percentage = sxe_dcb_cee_tc_link_percent_get(cee_config,
direction, tc_index);
LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n",
tc_index, tc_info->bwg_percent, link_percentage);
if (tc_info->bwg_percent > 0 && link_percentage == 0)
link_percentage = 1;
tc_info->link_percent = (u8)link_percentage;
credit_refill = min(link_percentage * total_credit,
(u32)MAX_CREDIT_REFILL);
if (credit_refill < min_credit)
credit_refill = min_credit;
tc_info->data_credits_refill = (u16)credit_refill;
LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n", tc_index,
credit_refill);
credit_max = (link_percentage * MAX_CREDIT) / SXE_PERCENT_100;
if (credit_max < min_credit)
credit_max = min_credit;
LOG_DEBUG_BDF("tc[%u] credit_max=%u\n", tc_index, credit_max);
if (direction == DCB_PATH_TX)
cee_config->tc_config[tc_index].desc_credits_max =
(u16)credit_max;
tc_info->data_credits_max = (u16)credit_max;
}
l_ret:
return ret;
}
void sxe_dcb_cee_pfc_parse(struct sxe_dcb_cee_config *cfg, u8 *pfc_en)
{
u32 tc;
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].pfc_type != pfc_disabled)
*pfc_en |= BIT(tc);
}
LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en);
}
void sxe_dcb_cee_refill_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u16 *refill)
{
u32 tc;
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
refill[tc] =
tc_config[tc].channel[direction].data_credits_refill;
LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]);
}
}
void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_cee_config *cfg,
u16 *max_credits)
{
u32 tc;
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
max_credits[tc] = tc_config[tc].desc_credits_max;
LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]);
}
}
void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *bwgid)
{
u32 tc;
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
bwgid[tc] = tc_config[tc].channel[direction].bwg_id;
LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]);
}
}
void sxe_dcb_cee_prio_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *ptype)
{
u32 tc;
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
ptype[tc] = tc_config[tc].channel[direction].prio_type;
LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]);
}
}
u8 sxe_dcb_cee_get_tc_from_up(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 up)
{
struct sxe_tc_config *tc_config = &cfg->tc_config[0];
u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs;
if (!tc)
goto l_ret;
for (tc--; tc; tc--) {
if (prio_mask &
tc_config[tc].channel[direction].up_to_tc_bitmap)
break;
}
l_ret:
LOG_DEBUG("up[%u] to tc[%u]\n", up, tc);
return tc;
}
void sxe_dcb_cee_up2tc_map_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *map)
{
u8 up;
for (up = 0; up < MAX_USER_PRIORITY; up++) {
map[up] = sxe_dcb_cee_get_tc_from_up(cfg, direction, up);
LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]);
}
}
static void sxe_dcb_hw_cee_bw_alloc_configure(struct sxe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
hw->dma.ops->dcb_rx_bw_alloc_configure(hw, refill, max, bwg_id, prio_type,
prio_tc, MAX_USER_PRIORITY);
hw->dma.ops->dcb_tx_desc_bw_alloc_configure(hw, refill, max, bwg_id,
prio_type);
hw->dma.ops->dcb_tx_data_bw_alloc_configure(hw, refill, max, bwg_id,
prio_type, prio_tc, MAX_USER_PRIORITY);
}
static void sxe_dcb_hw_cee_non_bw_alloc_configure(struct sxe_hw *hw)
{
hw->dma.ops->dcb_tc_stats_configure(hw);
}
static void sxe_dcb_hw_cee_configure(struct sxe_hw *hw,
struct sxe_dcb_cee_config *dcb_config)
{
u8 ptype[MAX_TRAFFIC_CLASS];
u8 bwgid[MAX_TRAFFIC_CLASS];
u8 prio_tc[MAX_TRAFFIC_CLASS];
u16 refill[MAX_TRAFFIC_CLASS];
u16 max[MAX_TRAFFIC_CLASS];
sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_TX, refill);
sxe_dcb_cee_max_credits_parse(dcb_config, max);
sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_TX, bwgid);
sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_TX, ptype);
sxe_dcb_cee_up2tc_map_parse(dcb_config, DCB_PATH_TX, prio_tc);
sxe_dcb_hw_cee_bw_alloc_configure(hw, refill, max, bwgid, ptype,
prio_tc);
sxe_dcb_hw_cee_non_bw_alloc_configure(hw);
}
static void sxe_dcb_ieee_tc_credits_calculate(u8 *bw, u16 *refill, u16 *max,
u32 max_frame)
{
u16 min_percent = 100;
u32 min_credit, total_credits;
u8 tc_index;
min_credit = sxe_dcb_min_credit_get(max_frame);
LOG_DEBUG("min_credit=%u, max_frame=%u\n", min_credit, max_frame);
for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
if (bw[tc_index] < min_percent && bw[tc_index])
min_percent = bw[tc_index];
}
LOG_DEBUG("min_percent=%u\n", min_percent);
total_credits = (min_credit / min_percent) + 1;
LOG_DEBUG("total_credits=%u\n", total_credits);
for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
u32 val = min(bw[tc_index] * total_credits,
(u32)MAX_CREDIT_REFILL);
if (val < min_credit)
val = min_credit;
refill[tc_index] = val;
LOG_DEBUG("tc[%u] credits_refill=%u\n", tc_index,
refill[tc_index]);
max[tc_index] = bw[tc_index] ? (bw[tc_index] * MAX_CREDIT) /
SXE_PERCENT_100 :
min_credit;
LOG_DEBUG("tc[%u] max_credits=%u\n", tc_index, max[tc_index]);
}
}
void sxe_dcb_hw_ets_configure(struct sxe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
hw->dma.ops->dcb_rx_bw_alloc_configure(hw, refill, max, bwg_id,
prio_type, prio_tc, MAX_USER_PRIORITY);
hw->dma.ops->dcb_tx_desc_bw_alloc_configure(hw, refill, max, bwg_id,
prio_type);
hw->dma.ops->dcb_tx_data_bw_alloc_configure(hw, refill, max, bwg_id,
prio_type, prio_tc, MAX_USER_PRIORITY);
}
s32 sxe_dcb_hw_ieee_ets_configure(struct sxe_hw *hw, struct ieee_ets *ets,
u32 max_frame)
{
u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
u8 prio_type[IEEE_8021QAZ_MAX_TCS];
u8 tc_index;
s32 ret = 0;
struct sxe_adapter *adapter = hw->adapter;
u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
for (tc_index = 0; tc_index < IEEE_8021QAZ_MAX_TCS; tc_index++) {
switch (ets->tc_tsa[tc_index]) {
case IEEE_8021QAZ_TSA_STRICT:
prio_type[tc_index] = 2;
break;
case IEEE_8021QAZ_TSA_ETS:
prio_type[tc_index] = 0;
break;
default:
LOG_ERROR_BDF("unsupport tsa[%u]=%u\n", tc_index,
ets->tc_tsa[tc_index]);
ret = -EINVAL;
goto l_ret;
}
LOG_DEBUG_BDF("tc[%u] prio_type=%u\n", tc_index,
prio_type[tc_index]);
}
sxe_dcb_ieee_tc_credits_calculate(ets->tc_tx_bw, refill, max,
max_frame);
sxe_dcb_hw_ets_configure(hw, refill, max, bwg_id, prio_type,
ets->prio_tc);
l_ret:
return ret;
}
void sxe_dcb_hw_pfc_configure(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
hw->dma.ops->dcb_pfc_configure(hw, pfc_en, prio_tc, MAX_USER_PRIORITY);
}
void sxe_dcb_pfc_configure(struct sxe_adapter *adapter)
{
u8 pfc_en = 0;
u8 prio_tc[MAX_TRAFFIC_CLASS];
struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg;
if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
LOG_DEBUG_BDF("pfc in cee mode\n");
sxe_dcb_cee_pfc_parse(cee_cfg, &pfc_en);
sxe_dcb_cee_up2tc_map_parse(cee_cfg, DCB_PATH_TX, prio_tc);
} else if (adapter->dcb_ctxt.ieee_ets && adapter->dcb_ctxt.ieee_pfc) {
LOG_DEBUG_BDF("pfc in ieee mode\n");
pfc_en = adapter->dcb_ctxt.ieee_pfc->pfc_en;
memcpy(prio_tc, adapter->dcb_ctxt.ieee_ets->prio_tc,
sizeof(prio_tc[0]) * MAX_TRAFFIC_CLASS);
}
if (pfc_en)
sxe_dcb_hw_pfc_configure(&adapter->hw, pfc_en, prio_tc);
}
void sxe_dcb_configure(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
u32 max_frame = adapter->netdev->mtu + SXE_ETH_DEAD_LOAD;
u16 rss = sxe_rss_num_get(adapter);
if (!(adapter->cap & SXE_DCB_ENABLE))
return;
if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
LOG_DEBUG_BDF("dcb in cee mode\n");
sxe_dcb_cee_tc_credits_calculate(hw, &adapter->dcb_ctxt.cee_cfg,
max_frame, DCB_PATH_TX);
sxe_dcb_cee_tc_credits_calculate(hw, &adapter->dcb_ctxt.cee_cfg,
max_frame, DCB_PATH_RX);
sxe_dcb_hw_cee_configure(hw, &adapter->dcb_ctxt.cee_cfg);
} else if (adapter->dcb_ctxt.ieee_ets && adapter->dcb_ctxt.ieee_pfc) {
LOG_DEBUG_BDF("dcb in ieee mode\n");
sxe_dcb_hw_ieee_ets_configure(&adapter->hw,
adapter->dcb_ctxt.ieee_ets, max_frame);
}
hw->dbu.ops->dcb_tc_rss_configure(hw, rss);
}
void sxe_dcb_exit(struct sxe_adapter *adapter)
{
kfree(adapter->dcb_ctxt.ieee_pfc);
kfree(adapter->dcb_ctxt.ieee_ets);
}
#endif

View File

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_dcb.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_DCB_H__
#define __SXE_DCB_H__
#ifdef SXE_DCB_CONFIGURE
#include <linux/dcbnl.h>
#include "sxe_hw.h"
extern const struct dcbnl_rtnl_ops sxe_dcbnl_ops;
struct sxe_adapter;
#endif
#define SXE_MAX_PACKET_BUFFERS 8
#define MAX_USER_PRIORITY 8
#define MAX_BW_GROUP 8
#define BW_PERCENT 100
enum {
DCB_PATH_TX = 0,
DCB_PATH_RX = 1,
DCB_PATH_NUM = DCB_PATH_RX + 1,
};
#define DCB_ERR_CONFIG 1
#define DCB_ERR_BW_GROUP -3
#define DCB_ERR_TC_BW -4
#define DCB_ERR_LS_GS -5
#define DCB_ERR_LS_BW_NONZERO -6
#define DCB_ERR_LS_BWG_NONZERO -7
#define DCB_ERR_TC_BW_ZERO -8
#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
#define SXE_DCB_MMW_SIZE_DEFAULT 0x04
#define SXE_PERCENT_100 100
#define SXE_DCB_PG_SUPPORT 0x00000001
#define SXE_DCB_PFC_SUPPORT 0x00000002
#define SXE_DCB_BCN_SUPPORT 0x00000004
#define SXE_DCB_UP2TC_SUPPORT 0x00000008
#define SXE_DCB_GSP_SUPPORT 0x00000010
#define SXE_DCB_8_TC_SUPPORT 0x80
#define DCB_CREDIT_QUANTUM 64
#define MAX_CREDIT_REFILL 511
#define DCB_MAX_TSO_SIZE (32 * 1024)
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE / 64 + 1)
#define MAX_CREDIT 4095
struct sxe_tc_bw_alloc {
u8 bwg_id;
u8 bwg_percent;
u8 link_percent;
u8 up_to_tc_bitmap;
u16 data_credits_refill;
u16 data_credits_max;
enum sxe_strict_prio_type prio_type;
};
enum sxe_dcb_pfc_type {
pfc_disabled = 0,
pfc_enabled_full,
pfc_enabled_tx,
pfc_enabled_rx
};
struct sxe_tc_config {
struct sxe_tc_bw_alloc channel[DCB_PATH_NUM];
enum sxe_dcb_pfc_type pfc_type;
u16 desc_credits_max;
};
struct sxe_dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
};
struct sxe_dcb_cee_config {
struct sxe_dcb_num_tcs num_tcs;
struct sxe_tc_config tc_config[MAX_TRAFFIC_CLASS];
u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP];
bool pfc_mode_enable;
};
#ifdef SXE_DCB_CONFIGURE
void sxe_dcb_init(struct sxe_adapter *adapter);
void sxe_dcb_configure(struct sxe_adapter *adapter);
u8 sxe_dcb_cee_get_tc_from_up(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 up);
s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
struct sxe_dcb_cee_config *dcb_config,
u32 max_frame, u8 direction);
void sxe_dcb_cee_refill_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u16 *refill);
void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_cee_config *cfg,
u16 *max_credits);
void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *bwgid);
void sxe_dcb_cee_prio_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *ptype);
void sxe_dcb_cee_up2tc_map_parse(struct sxe_dcb_cee_config *cfg, u8 direction,
u8 *map);
void sxe_dcb_cee_pfc_parse(struct sxe_dcb_cee_config *cfg, u8 *pfc_en);
void sxe_dcb_hw_pfc_configure(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc);
void sxe_dcb_hw_ets_configure(struct sxe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *prio_tc);
s32 sxe_dcb_hw_ieee_ets_configure(struct sxe_hw *hw, struct ieee_ets *ets,
u32 max_frame);
void sxe_dcb_pfc_configure(struct sxe_adapter *adapter);
void sxe_dcb_exit(struct sxe_adapter *adapter);
s32 sxe_dcb_tc_validate(struct sxe_adapter *adapter, u8 tc);
s32 sxe_dcb_tc_setup(struct sxe_adapter *adapter, u8 tc);
#endif
void sxe_rx_drop_mode_set(struct sxe_adapter *adapter);
#endif

View File

@ -0,0 +1,996 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_dcb_nl.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe.h"
#ifdef SXE_DCB_CONFIGURE
#include <linux/dcbnl.h>
#include "sxe_phy.h"
#include "sxe_dcb.h"
#include "sxe_sriov.h"
#include "sxe_netdev.h"
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
#define DCB_HW_CHG_RST 0
#define DCB_NO_HW_CHG 1
#define DCB_HW_CHG 2
s32 sxe_dcb_tc_validate(struct sxe_adapter *adapter, u8 tc)
{
s32 ret = 0;
if (tc > adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs) {
LOG_ERROR_BDF(" tc num [%u] is invalid, max tc num=%u\n", tc,
adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs);
ret = -EINVAL;
}
return ret;
}
static void sxe_prio_tc_map_set(struct sxe_adapter *adapter)
{
u8 prio;
u8 tc = 0;
struct net_device *dev = adapter->netdev;
struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg;
struct ieee_ets *ets = adapter->dcb_ctxt.ieee_ets;
for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)
tc = sxe_dcb_cee_get_tc_from_up(cee_cfg, DCB_PATH_TX,
prio);
else if (ets)
tc = ets->prio_tc[prio];
netdev_set_prio_tc_map(dev, prio, tc);
}
}
s32 sxe_dcb_tc_setup(struct sxe_adapter *adapter, u8 tc)
{
s32 ret = 0;
struct net_device *netdev = adapter->netdev;
struct sxe_hw *hw = &adapter->hw;
LOG_DEBUG_BDF("current dcb state=%x, tc_num=%u, cfg tc_num=%u\n",
!!(adapter->cap & SXE_DCB_ENABLE),
sxe_dcb_tc_get(adapter), tc);
if (tc) {
if (adapter->xdp_prog) {
LOG_MSG_WARN(probe, "dcb is not supported with xdp\n");
sxe_ring_irq_init(adapter);
if (netif_running(netdev))
sxe_open(netdev);
ret = -EINVAL;
goto l_ret;
}
netdev_set_num_tc(netdev, tc);
sxe_prio_tc_map_set(adapter);
sxe_dcb_tc_set(adapter, tc);
adapter->cap |= SXE_DCB_ENABLE;
LOG_DEBUG_BDF("dcb enable, cfg tc_num=%u\n", tc);
} else {
netdev_reset_tc(netdev);
adapter->cap &= ~SXE_DCB_ENABLE;
sxe_dcb_tc_set(adapter, tc);
adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = false;
adapter->dcb_ctxt.cee_cfg.pfc_mode_enable = false;
LOG_DEBUG_BDF("dcb disable, cfg tc_num=%u\n", tc);
}
hw->dma.ops->dcb_rx_up_tc_map_set(hw, tc);
l_ret:
return ret;
}
static u8 sxe_dcbnl_state_get(struct net_device *netdev)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
LOG_DEBUG_BDF("dcb current state=%u\n",
!!(adapter->cap & SXE_DCB_ENABLE));
return (u8)(!!(adapter->cap & SXE_DCB_ENABLE));
}
static u8 sxe_dcbnl_state_set(struct net_device *netdev, u8 state)
{
s32 ret = 1;
struct sxe_adapter *adapter = netdev_priv(netdev);
if (!state == !(adapter->cap & SXE_DCB_ENABLE)) {
LOG_INFO_BDF("dcb current state=%x, set state=%x, no change\n",
!!(adapter->cap & SXE_DCB_ENABLE), state);
ret = 0;
goto l_end;
}
LOG_DEBUG_BDF("dcb current state=%u, set state=%u, setup tc\n",
!!(adapter->cap & SXE_DCB_ENABLE), state);
ret = !!sxe_ring_reassign(adapter,
state ? adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs : 0);
l_end:
return ret;
}
static void sxe_dcbnl_perm_addr_get(struct net_device *netdev, u8 *perm_addr)
{
u32 i;
struct sxe_adapter *adapter = netdev_priv(netdev);
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->mac_filter_ctxt.def_mac_addr[i];
LOG_DEBUG_BDF("perm_addr=%pM\n", perm_addr);
}
static void sxe_dcbnl_tx_pg_tc_cfg_set(struct net_device *netdev, int tc,
u8 prio_type, u8 bwg_id, u8 bwg_pct,
u8 up_map)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt;
LOG_DEBUG_BDF("tx pg tc config, tc=%d, prio=%u, bwg_id=%u,\n"
"\tbwg_pct=%u, up_map=%u\n",
tc, prio_type, bwg_id, bwg_pct, up_map);
if (prio_type != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_TX].prio_type = prio_type;
}
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc].channel[DCB_PATH_TX].bwg_id =
bwg_id;
}
if (bwg_pct != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_TX]
.bwg_percent = bwg_pct;
}
if (up_map != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_TX]
.up_to_tc_bitmap = up_map;
}
}
static void sxe_dcbnl_tx_pg_bwg_cfg_set(struct net_device *netdev, int bwg_id,
u8 bwg_pct)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
LOG_DEBUG_BDF("tx bw config, bwg_id=%d, bwg_pct=%u\n", bwg_id, bwg_pct);
adapter->dcb_ctxt.cee_temp_cfg.bwg_link_percent[DCB_PATH_TX][bwg_id] =
bwg_pct;
}
static void sxe_dcbnl_rx_pg_tc_cfg_set(struct net_device *netdev, int tc,
u8 prio_type, u8 bwg_id, u8 bwg_pct,
u8 up_map)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt;
LOG_DEBUG_BDF("rx pg tc config, tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u,\n"
"\tup_map=%u\n",
tc, prio_type, bwg_id, bwg_pct, up_map);
if (prio_type != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_RX]
.prio_type = prio_type;
}
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc].channel[DCB_PATH_RX].bwg_id =
bwg_id;
}
if (bwg_pct != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_RX]
.bwg_percent = bwg_pct;
}
if (up_map != DCB_ATTR_VALUE_UNDEFINED) {
dcb_ctxt->cee_temp_cfg.tc_config[tc]
.channel[DCB_PATH_RX]
.up_to_tc_bitmap = up_map;
}
}
static void sxe_dcbnl_rx_pg_bwg_cfg_set(struct net_device *netdev, int bwg_id,
u8 bwg_pct)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
LOG_DEBUG_BDF("rx bw config, bwg_id=%d, bwg_pct=%u\n", bwg_id, bwg_pct);
adapter->dcb_ctxt.cee_temp_cfg.bwg_link_percent[DCB_PATH_RX][bwg_id] =
bwg_pct;
}
static void sxe_dcbnl_tx_pg_tc_cfg_get(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bwg_pct,
u8 *up_map)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt;
*prio = dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_TX].prio_type;
*bwg_id = dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_TX].bwg_id;
*bwg_pct =
dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_TX].bwg_percent;
*up_map = dcb_ctxt->cee_cfg.tc_config[tc]
.channel[DCB_PATH_TX]
.up_to_tc_bitmap;
LOG_DEBUG_BDF("get tx pg cfg: tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u,\n"
"\tup_map=%u\n",
tc, *prio, *bwg_id, *bwg_pct, *up_map);
}
static void sxe_dcbnl_tx_pg_bwg_cfg_get(struct net_device *netdev, int bwg_id,
u8 *bwg_pct)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
*bwg_pct =
adapter->dcb_ctxt.cee_cfg.bwg_link_percent[DCB_PATH_TX][bwg_id];
LOG_DEBUG_BDF("get tx bwg cfg: bwg_id=%u, bwg_pct=%d\n", bwg_id,
*bwg_pct);
}
static void sxe_dcbnl_rx_pg_tc_cfg_get(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bwg_pct,
u8 *up_map)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt;
LOG_DEBUG_BDF("get rx pg cfg: tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u,\n"
"\tup_map=%u\n",
tc, *prio, *bwg_id, *bwg_pct, *up_map);
*prio = dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_RX].prio_type;
*bwg_id = dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_RX].bwg_id;
*bwg_pct =
dcb_ctxt->cee_cfg.tc_config[tc].channel[DCB_PATH_RX].bwg_percent;
*up_map = dcb_ctxt->cee_cfg.tc_config[tc]
.channel[DCB_PATH_RX]
.up_to_tc_bitmap;
}
static void sxe_dcbnl_rx_pg_bwg_cfg_get(struct net_device *netdev, int bwg_id,
u8 *bwg_pct)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
*bwg_pct =
adapter->dcb_ctxt.cee_cfg.bwg_link_percent[DCB_PATH_RX][bwg_id];
LOG_DEBUG_BDF("get rx bwg cfg: bwg_id=%d, bwg_pct=%u\n", bwg_id,
*bwg_pct);
}
static void sxe_dcbnl_pfc_cfg_set(struct net_device *netdev, int tc, u8 setting)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
adapter->dcb_ctxt.cee_temp_cfg.tc_config[tc].pfc_type = setting;
if (adapter->dcb_ctxt.cee_temp_cfg.tc_config[tc].pfc_type !=
adapter->dcb_ctxt.cee_cfg.tc_config[tc].pfc_type) {
adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = true;
}
LOG_DEBUG_BDF("set pfc: tc=%d, setting=%u\n", tc, setting);
}
static void sxe_dcbnl_pfc_cfg_get(struct net_device *netdev, int tc,
u8 *setting)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
*setting = adapter->dcb_ctxt.cee_cfg.tc_config[tc].pfc_type;
LOG_DEBUG_BDF("get pfc: priority=%d, setting=%u\n", tc, *setting);
}
static s32 sxe_dcb_cfg_copy(struct sxe_adapter *adapter, int tc_max)
{
u32 i;
u32 changes = 0;
u32 tx = DCB_PATH_TX;
u32 rx = DCB_PATH_RX;
struct sxe_tc_config *src;
struct sxe_tc_config *dst;
struct sxe_dcb_cee_config *scfg = &adapter->dcb_ctxt.cee_temp_cfg;
struct sxe_dcb_cee_config *dcfg = &adapter->dcb_ctxt.cee_cfg;
for (i = 0; i < tc_max; i++) {
src = &scfg->tc_config[i];
dst = &dcfg->tc_config[i];
if (dst->channel[tx].prio_type != src->channel[tx].prio_type) {
dst->channel[tx].prio_type = src->channel[tx].prio_type;
changes |= BIT_PG_TX;
}
if (dst->channel[tx].bwg_id != src->channel[tx].bwg_id) {
dst->channel[tx].bwg_id = src->channel[tx].bwg_id;
changes |= BIT_PG_TX;
}
if (dst->channel[tx].bwg_percent !=
src->channel[tx].bwg_percent) {
dst->channel[tx].bwg_percent =
src->channel[tx].bwg_percent;
changes |= BIT_PG_TX;
}
if (dst->channel[tx].up_to_tc_bitmap !=
src->channel[tx].up_to_tc_bitmap) {
dst->channel[tx].up_to_tc_bitmap =
src->channel[tx].up_to_tc_bitmap;
changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG);
}
if (dst->channel[rx].prio_type != src->channel[rx].prio_type) {
dst->channel[rx].prio_type = src->channel[rx].prio_type;
changes |= BIT_PG_RX;
}
if (dst->channel[rx].bwg_id != src->channel[rx].bwg_id) {
dst->channel[rx].bwg_id = src->channel[rx].bwg_id;
changes |= BIT_PG_RX;
}
if (dst->channel[rx].bwg_percent !=
src->channel[rx].bwg_percent) {
dst->channel[rx].bwg_percent =
src->channel[rx].bwg_percent;
changes |= BIT_PG_RX;
}
if (dst->channel[rx].up_to_tc_bitmap !=
src->channel[rx].up_to_tc_bitmap) {
dst->channel[rx].up_to_tc_bitmap =
src->channel[rx].up_to_tc_bitmap;
changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG);
}
}
for (i = 0; i < SXE_DCB_TC_MAX; i++) {
if (dcfg->bwg_link_percent[tx][i] !=
scfg->bwg_link_percent[tx][i]) {
dcfg->bwg_link_percent[tx][i] =
scfg->bwg_link_percent[tx][i];
changes |= BIT_PG_TX;
}
if (dcfg->bwg_link_percent[rx][i] !=
scfg->bwg_link_percent[rx][i]) {
dcfg->bwg_link_percent[rx][i] =
scfg->bwg_link_percent[rx][i];
changes |= BIT_PG_RX;
}
}
for (i = 0; i < SXE_DCB_TC_MAX; i++) {
if (dcfg->tc_config[i].pfc_type !=
scfg->tc_config[i].pfc_type) {
dcfg->tc_config[i].pfc_type =
scfg->tc_config[i].pfc_type;
changes |= BIT_PFC;
}
}
if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) {
dcfg->pfc_mode_enable = scfg->pfc_mode_enable;
changes |= BIT_PFC;
}
LOG_DEBUG_BDF("cee cfg cpy, change cfg=%x\n", changes);
return changes;
}
static u8 sxe_dcbnl_cee_configure(struct net_device *netdev)
{
u32 i;
u8 pfc_en;
u32 max_frame;
u8 ret = DCB_NO_HW_CHG;
u8 prio_tc[MAX_USER_PRIORITY];
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_hw *hw = &adapter->hw;
struct sxe_dcb_cee_config *dcb_cfg = &adapter->dcb_ctxt.cee_cfg;
LOG_DEBUG_BDF("dcbnl cfg setall\n");
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) {
LOG_DEBUG_BDF("not cee mode, settings are not supported\n");
ret = DCB_NO_HW_CHG;
goto l_end;
}
adapter->dcb_ctxt.cee_cfg_bitmap |=
sxe_dcb_cfg_copy(adapter, MAX_TRAFFIC_CLASS);
if (!adapter->dcb_ctxt.cee_cfg_bitmap) {
LOG_DEBUG_BDF("cfg not change\n");
ret = DCB_NO_HW_CHG;
goto l_end;
}
if (adapter->dcb_ctxt.cee_cfg_bitmap & (BIT_PG_TX | BIT_PG_RX)) {
max_frame = adapter->netdev->mtu + SXE_ETH_DEAD_LOAD;
sxe_dcb_cee_tc_credits_calculate(hw, dcb_cfg, max_frame,
DCB_PATH_TX);
sxe_dcb_cee_tc_credits_calculate(hw, dcb_cfg, max_frame,
DCB_PATH_RX);
sxe_dcb_cee_refill_parse(dcb_cfg, DCB_PATH_TX, refill);
sxe_dcb_cee_max_credits_parse(dcb_cfg, max);
sxe_dcb_cee_bwgid_parse(dcb_cfg, DCB_PATH_TX, bwg_id);
sxe_dcb_cee_prio_parse(dcb_cfg, DCB_PATH_TX, prio_type);
sxe_dcb_cee_up2tc_map_parse(dcb_cfg, DCB_PATH_TX, prio_tc);
sxe_dcb_hw_ets_configure(hw, refill, max, bwg_id, prio_type,
prio_tc);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
ret = DCB_HW_CHG_RST;
}
if (adapter->dcb_ctxt.cee_cfg_bitmap & BIT_PFC) {
if (dcb_cfg->pfc_mode_enable) {
sxe_dcb_cee_up2tc_map_parse(dcb_cfg, DCB_PATH_TX,
prio_tc);
sxe_dcb_cee_pfc_parse(dcb_cfg, &pfc_en);
sxe_dcb_hw_pfc_configure(hw, pfc_en, prio_tc);
} else {
sxe_fc_enable(adapter);
}
sxe_rx_drop_mode_set(adapter);
ret = DCB_HW_CHG;
}
adapter->dcb_ctxt.cee_cfg_bitmap = 0x0;
l_end:
return ret;
}
static u8 sxe_dcbnl_all_set(struct net_device *netdev)
{
return sxe_dcbnl_cee_configure(netdev);
}
static u8 sxe_dcbnl_cap_get(struct net_device *netdev, int capid, u8 *cap)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
case DCB_CAP_ATTR_PFC:
case DCB_CAP_ATTR_GSP:
*cap = true;
break;
case DCB_CAP_ATTR_UP2TC:
case DCB_CAP_ATTR_BCN:
*cap = false;
break;
case DCB_CAP_ATTR_PG_TCS:
case DCB_CAP_ATTR_PFC_TCS:
*cap = 0x80;
break;
case DCB_CAP_ATTR_DCBX:
*cap = adapter->dcb_ctxt.dcbx_cap;
break;
default:
*cap = false;
break;
}
LOG_DEBUG_BDF("get dcb cap=%x\n", *cap);
return 0;
}
static int sxe_dcbnl_num_tcs_get(struct net_device *netdev, int tcid, u8 *num)
{
int ret = 0;
struct sxe_adapter *adapter = netdev_priv(netdev);
if (adapter->cap & SXE_DCB_ENABLE) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
*num = adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs;
break;
case DCB_NUMTCS_ATTR_PFC:
*num = adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs;
break;
default:
LOG_ERROR_BDF("feature dont support=%x\n", tcid);
ret = -EINVAL;
}
} else {
LOG_ERROR_BDF("dcb disable\n");
ret = -EINVAL;
}
LOG_DEBUG_BDF("tcid=%x, tcs=%u\n", tcid, *num);
return ret;
}
static int sxe_dcbnl_num_tcs_set(struct net_device *netdev, int tcid, u8 num)
{
LOG_WARN("configuring tc is not supported\n");
return -EINVAL;
}
static u8 sxe_dcbnl_pfc_state_get(struct net_device *netdev)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
LOG_DEBUG_BDF("pfc state=%x\n",
adapter->dcb_ctxt.cee_cfg.pfc_mode_enable);
return adapter->dcb_ctxt.cee_cfg.pfc_mode_enable;
}
static void sxe_dcbnl_pfc_state_set(struct net_device *netdev, u8 state)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
LOG_DEBUG_BDF("current pfc state=%x, set state=%x\n",
adapter->dcb_ctxt.cee_cfg.pfc_mode_enable, state);
adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = state;
}
#ifdef DCBNL_OPS_GETAPP_RETURN_U8
static u8 sxe_dcbnl_app_get(struct net_device *netdev, u8 idtype, u16 id)
#else
static int sxe_dcbnl_app_get(struct net_device *netdev, u8 idtype, u16 id)
#endif
{
int ret;
struct sxe_adapter *adapter = netdev_priv(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
};
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) {
LOG_DEBUG_BDF("not cee mode, not supported get\n");
#ifdef DCBNL_OPS_GETAPP_RETURN_U8
ret = 0;
#else
ret = -EINVAL;
#endif
goto l_end;
}
ret = dcb_getapp(netdev, &app);
LOG_DEBUG_BDF("idtype=%x, id=%x, app=%x\n", idtype, id, ret);
l_end:
return ret;
}
static void sxe_dcbnl_devreset(struct net_device *dev)
{
s32 ret;
struct sxe_adapter *adapter = netdev_priv(dev);
while (test_and_set_bit(SXE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (netif_running(dev))
dev->netdev_ops->ndo_stop(dev);
sxe_ring_irq_exit(adapter);
ret = sxe_ring_irq_init(adapter);
if (ret) {
LOG_ERROR_BDF("interrupt ring assign scheme init failed, err=%d\n",
ret);
goto l_end;
}
if (netif_running(dev))
dev->netdev_ops->ndo_open(dev);
clear_bit(SXE_RESETTING, &adapter->state);
LOG_DEBUG_BDF("dcbnl reset finish\n");
l_end:
;
}
static int sxe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
struct sxe_adapter *adapter = netdev_priv(dev);
struct ieee_ets *hw_ets = adapter->dcb_ctxt.ieee_ets;
ets->ets_cap = adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs;
if (!hw_ets) {
LOG_DEBUG_BDF("dont have ets cfg\n");
goto l_end;
}
ets->cbs = hw_ets->cbs;
memcpy(ets->tc_tx_bw, hw_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, hw_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(ets->tc_tsa, hw_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, hw_ets->prio_tc, sizeof(ets->prio_tc));
LOG_DEBUG_BDF("get ets cfg ok\n");
l_end:
return 0;
}
static int sxe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
int ret;
u32 i;
u8 max_tc = 0;
u8 map_chg = 0;
u32 max_frame = dev->mtu + SXE_ETH_DEAD_LOAD;
struct sxe_adapter *adapter = netdev_priv(dev);
struct sxe_hw *hw = &adapter->hw;
LOG_DEBUG_BDF("set ets\n");
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
LOG_ERROR_BDF("not ieee, dont support\n");
ret = -EINVAL;
goto l_end;
}
if (!adapter->dcb_ctxt.ieee_ets) {
adapter->dcb_ctxt.ieee_ets =
kmalloc(sizeof(*adapter->dcb_ctxt.ieee_ets), GFP_KERNEL);
if (!adapter->dcb_ctxt.ieee_ets) {
LOG_ERROR_BDF("kmalloc failed\n");
ret = -ENOMEM;
goto l_end;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
adapter->dcb_ctxt.ieee_ets->prio_tc[i] =
IEEE_8021QAZ_MAX_TCS;
hw->dma.ops->dcb_rx_up_tc_map_get(hw,
adapter->dcb_ctxt.ieee_ets->prio_tc);
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
if (ets->prio_tc[i] != adapter->dcb_ctxt.ieee_ets->prio_tc[i])
map_chg = 1;
}
memcpy(adapter->dcb_ctxt.ieee_ets, ets,
sizeof(*adapter->dcb_ctxt.ieee_ets));
if (max_tc)
max_tc++;
if (max_tc > adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs) {
LOG_ERROR_BDF("set tc=%u > max tc=%u\n", max_tc,
adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs);
ret = -EINVAL;
goto l_end;
}
if (max_tc != adapter->dcb_ctxt.hw_tcs) {
ret = sxe_ring_reassign(adapter, max_tc);
if (ret) {
LOG_ERROR_BDF("ring reassign failed, ret=%d\n", ret);
goto l_end;
}
} else if (map_chg) {
sxe_dcbnl_devreset(dev);
}
ret = sxe_dcb_hw_ieee_ets_configure(&adapter->hw, ets, max_frame);
if (ret) {
LOG_ERROR_BDF("ets config failed, max_frame=%u, ret=%u\n",
max_frame, ret);
}
l_end:
return ret;
}
static int sxe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct sxe_adapter *adapter = netdev_priv(dev);
struct ieee_pfc *hw_pfc = adapter->dcb_ctxt.ieee_pfc;
pfc->pfc_cap = adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs;
if (!hw_pfc) {
LOG_DEBUG_BDF("dont have pfc cfg\n");
goto l_end;
}
pfc->pfc_en = hw_pfc->pfc_en;
pfc->mbc = hw_pfc->mbc;
pfc->delay = hw_pfc->delay;
LOG_DEBUG_BDF("get pfc cfg ok\n");
l_end:
return 0;
}
static int sxe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
int ret = 0;
u8 *prio_tc;
struct sxe_adapter *adapter = netdev_priv(dev);
struct sxe_hw *hw = &adapter->hw;
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
LOG_ERROR_BDF("not ieee, dont support\n");
ret = -EINVAL;
goto l_end;
}
if (!adapter->dcb_ctxt.ieee_pfc) {
adapter->dcb_ctxt.ieee_pfc =
kmalloc(sizeof(*adapter->dcb_ctxt.ieee_pfc), GFP_KERNEL);
if (!adapter->dcb_ctxt.ieee_pfc) {
LOG_ERROR_BDF("kmalloc failed\n");
ret = -ENOMEM;
goto l_end;
}
}
prio_tc = adapter->dcb_ctxt.ieee_ets->prio_tc;
memcpy(adapter->dcb_ctxt.ieee_pfc, pfc,
sizeof(*adapter->dcb_ctxt.ieee_pfc));
if (pfc->pfc_en)
sxe_dcb_hw_pfc_configure(hw, pfc->pfc_en, prio_tc);
else
sxe_fc_enable(adapter);
sxe_rx_drop_mode_set(adapter);
l_end:
return ret;
}
static int sxe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
int ret;
u32 vf;
struct sxe_vf_info *vfinfo;
struct sxe_adapter *adapter = netdev_priv(dev);
struct sxe_hw *hw = &adapter->hw;
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
LOG_ERROR_BDF("not ieee, dont support\n");
ret = -EINVAL;
goto l_end;
}
ret = dcb_ieee_setapp(dev, app);
if (ret) {
LOG_ERROR_BDF("set app failed, ret=%d\n", ret);
goto l_end;
}
if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == 0) {
adapter->dcb_ctxt.default_up = app->priority;
for (vf = 0; vf < adapter->vt_ctxt.num_vfs; vf++) {
vfinfo = &adapter->vt_ctxt.vf_info[vf];
if (!vfinfo->pf_qos) {
hw->dma.ops->tx_vlan_tag_set(hw,
vfinfo->pf_vlan, app->priority, vf);
}
}
}
LOG_DEBUG_BDF("set app ok\n");
l_end:
return ret;
}
static int sxe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
int ret;
u32 vf;
u16 qos;
unsigned long app_mask;
struct sxe_vf_info *vfinfo;
struct sxe_adapter *adapter = netdev_priv(dev);
struct sxe_hw *hw = &adapter->hw;
if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
LOG_ERROR_BDF("not ieee, dont support\n");
ret = -EINVAL;
goto l_end;
}
ret = dcb_ieee_delapp(dev, app);
if (ret) {
LOG_ERROR_BDF("del app failed, ret=%d\n", ret);
goto l_end;
}
if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == 0 &&
adapter->dcb_ctxt.default_up == app->priority) {
app_mask = dcb_ieee_getapp_mask(dev, app);
qos = app_mask ? find_first_bit(&app_mask, 8) : 0;
adapter->dcb_ctxt.default_up = qos;
for (vf = 0; vf < adapter->vt_ctxt.num_vfs; vf++) {
vfinfo = &adapter->vt_ctxt.vf_info[vf];
if (!vfinfo->pf_qos)
hw->dma.ops->tx_vlan_tag_set(hw,
vfinfo->pf_vlan, qos, vf);
}
}
LOG_DEBUG_BDF("del app ok\n");
l_end:
return ret;
}
static u8 sxe_dcbnl_dcbx_get(struct net_device *dev)
{
struct sxe_adapter *adapter = netdev_priv(dev);
LOG_DEBUG_BDF("dcbx cap=%x\n", adapter->dcb_ctxt.dcbx_cap);
return adapter->dcb_ctxt.dcbx_cap;
}
static u8 sxe_dcbnl_dcbx_set(struct net_device *dev, u8 mode)
{
u8 ret = 0;
s32 err = 0;
struct ieee_ets ets = { 0 };
struct ieee_pfc pfc = { 0 };
struct sxe_adapter *adapter = netdev_priv(dev);
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST)) {
LOG_ERROR_BDF("dont support mode=%x\n", mode);
ret = 1;
goto l_end;
}
if (mode == adapter->dcb_ctxt.dcbx_cap)
goto l_end;
adapter->dcb_ctxt.dcbx_cap = mode;
ets.ets_cap = 8;
pfc.pfc_cap = 8;
if (mode & DCB_CAP_DCBX_VER_IEEE) {
sxe_dcbnl_ieee_setets(dev, &ets);
sxe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
adapter->dcb_ctxt.cee_cfg_bitmap |= mask;
sxe_dcbnl_cee_configure(dev);
} else {
sxe_dcbnl_ieee_setets(dev, &ets);
sxe_dcbnl_ieee_setpfc(dev, &pfc);
err = sxe_ring_reassign(adapter, 0);
if (err) {
LOG_ERROR_BDF("ring reassign failed, err=%d\n", err);
ret = 1;
}
}
l_end:
return ret;
}
const struct dcbnl_rtnl_ops sxe_dcbnl_ops = {
.ieee_getets = sxe_dcbnl_ieee_getets,
.ieee_setets = sxe_dcbnl_ieee_setets,
.ieee_getpfc = sxe_dcbnl_ieee_getpfc,
.ieee_setpfc = sxe_dcbnl_ieee_setpfc,
.ieee_setapp = sxe_dcbnl_ieee_setapp,
.ieee_delapp = sxe_dcbnl_ieee_delapp,
.getstate = sxe_dcbnl_state_get,
.setstate = sxe_dcbnl_state_set,
.getpermhwaddr = sxe_dcbnl_perm_addr_get,
.setpgtccfgtx = sxe_dcbnl_tx_pg_tc_cfg_set,
.setpgbwgcfgtx = sxe_dcbnl_tx_pg_bwg_cfg_set,
.setpgtccfgrx = sxe_dcbnl_rx_pg_tc_cfg_set,
.setpgbwgcfgrx = sxe_dcbnl_rx_pg_bwg_cfg_set,
.getpgtccfgtx = sxe_dcbnl_tx_pg_tc_cfg_get,
.getpgbwgcfgtx = sxe_dcbnl_tx_pg_bwg_cfg_get,
.getpgtccfgrx = sxe_dcbnl_rx_pg_tc_cfg_get,
.getpgbwgcfgrx = sxe_dcbnl_rx_pg_bwg_cfg_get,
.setpfccfg = sxe_dcbnl_pfc_cfg_set,
.getpfccfg = sxe_dcbnl_pfc_cfg_get,
.setall = sxe_dcbnl_all_set,
.getcap = sxe_dcbnl_cap_get,
.getnumtcs = sxe_dcbnl_num_tcs_get,
.setnumtcs = sxe_dcbnl_num_tcs_set,
.getpfcstate = sxe_dcbnl_pfc_state_get,
.setpfcstate = sxe_dcbnl_pfc_state_set,
.getapp = sxe_dcbnl_app_get,
.getdcbx = sxe_dcbnl_dcbx_get,
.setdcbx = sxe_dcbnl_dcbx_set,
};
#endif
void sxe_rx_drop_mode_set(struct sxe_adapter *adapter)
{
u32 i;
struct sxe_hw *hw = &adapter->hw;
bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable;
u32 current_mode = hw->mac.ops->fc_current_mode_get(hw);
#ifdef SXE_DCB_CONFIGURE
if (adapter->dcb_ctxt.ieee_pfc)
pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en);
#endif
if (adapter->vt_ctxt.num_vfs ||
(adapter->rx_ring_ctxt.num > 1 &&
!(current_mode & SXE_FC_TX_PAUSE) && !pfc_en)) {
for (i = 0; i < adapter->rx_ring_ctxt.num; i++) {
hw->dma.ops->rx_drop_switch(hw,
adapter->rx_ring_ctxt.ring[i]->reg_idx, true);
}
} else {
for (i = 0; i < adapter->rx_ring_ctxt.num; i++) {
hw->dma.ops->rx_drop_switch(hw,
adapter->rx_ring_ctxt.ring[i]->reg_idx, false);
}
}
}

View File

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_debug.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/highmem.h>
#include "sxe_debug.h"
#define SKB_DESCRIPTION_LEN 256
void sxe_dump_skb(struct sk_buff *skb)
{
#ifndef SXE_DRIVER_RELEASE
u32 len = skb->len;
u32 data_len = skb->data_len;
#endif
s8 desc[SKB_DESCRIPTION_LEN] = {};
snprintf(desc, SKB_DESCRIPTION_LEN, "skb addr:%p %s", skb,
"linear region");
#ifndef SXE_DRIVER_RELEASE
sxe_log_binary(__FILE__, __func__, __LINE__, (u8 *)skb->data,
(u64)skb, min_t(u32, len - data_len, 256), desc);
#endif
}

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_debug.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_DEBUG_H__
#define __SXE_DEBUG_H__
#include <linux/skbuff.h>
#include "sxe_log.h"
void sxe_dump_skb(struct sk_buff *skb);
#if defined SXE_DRIVER_RELEASE
#define SKB_DUMP(skb)
#else
#define SKB_DUMP(skb) sxe_dump_skb(skb)
#endif
#endif

View File

@ -0,0 +1,473 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_debugfs.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include "sxe.h"
#include "sxe_netdev.h"
#include "sxe_version.h"
#include "sxe_phy.h"
#define SXE_HW_STATS_LEN ARRAY_SIZE(hw_stats)
struct sxe_debugfs_hw_stats {
const char *stat_string;
int sizeof_stat;
int stat_offset;
};
static const struct sxe_debugfs_hw_stats hw_stats[] = {
{ "rx_good_pkts", sizeof(((struct sxe_adapter *)0)->stats.hw.gprc),
offsetof(struct sxe_adapter, stats.hw.gprc)},
{ "tx_good_pkts", sizeof(((struct sxe_adapter *)0)->stats.hw.gptc),
offsetof(struct sxe_adapter, stats.hw.gptc)},
{ "rx_good_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.gorc),
offsetof(struct sxe_adapter, stats.hw.gorc)},
{ "tx_good_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.gotc),
offsetof(struct sxe_adapter, stats.hw.gotc)},
{ "rx_broadcast", sizeof(((struct sxe_adapter *)0)->stats.hw.bprc),
offsetof(struct sxe_adapter, stats.hw.bprc)},
{ "tx_broadcast", sizeof(((struct sxe_adapter *)0)->stats.hw.bptc),
offsetof(struct sxe_adapter, stats.hw.bptc)},
{ "rx_multicast", sizeof(((struct sxe_adapter *)0)->stats.hw.mprc),
offsetof(struct sxe_adapter, stats.hw.mprc)},
{ "tx_multicast", sizeof(((struct sxe_adapter *)0)->stats.hw.mptc),
offsetof(struct sxe_adapter, stats.hw.mptc)},
{ "fnav_match", sizeof(((struct sxe_adapter *)0)->stats.hw.fnavmatch),
offsetof(struct sxe_adapter, stats.hw.fnavmatch)},
{ "fnav_miss", sizeof(((struct sxe_adapter *)0)->stats.hw.fnavmiss),
offsetof(struct sxe_adapter, stats.hw.fnavmiss)},
{ "rx_64_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.prc64),
offsetof(struct sxe_adapter, stats.hw.prc64)},
{ "rx_65~127_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.prc127),
offsetof(struct sxe_adapter, stats.hw.prc127)},
{ "rx_128~255_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.prc255),
offsetof(struct sxe_adapter, stats.hw.prc255)},
{ "rx_256~511_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.prc511),
offsetof(struct sxe_adapter, stats.hw.prc511)},
{ "rx_512~1023_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.prc1023),
offsetof(struct sxe_adapter, stats.hw.prc1023)},
{ "rx_1024~1522_bytes",
sizeof(((struct sxe_adapter *)0)->stats.hw.prc1522),
offsetof(struct sxe_adapter, stats.hw.prc1522)},
{ "tx_64_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.ptc64),
offsetof(struct sxe_adapter, stats.hw.ptc64)},
{ "tx_65~127_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.ptc127),
offsetof(struct sxe_adapter, stats.hw.ptc127)},
{ "tx_128~255_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.ptc255),
offsetof(struct sxe_adapter, stats.hw.ptc255)},
{ "tx_256~511_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.ptc511),
offsetof(struct sxe_adapter, stats.hw.ptc511)},
{ "tx_512~1023_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.ptc1023),
offsetof(struct sxe_adapter, stats.hw.ptc1023)},
{ "tx_1024~1522_bytes",
sizeof(((struct sxe_adapter *)0)->stats.hw.ptc1522),
offsetof(struct sxe_adapter, stats.hw.ptc1522)},
{ "rx_total_pkts", sizeof(((struct sxe_adapter *)0)->stats.hw.tpr),
offsetof(struct sxe_adapter, stats.hw.tpr)},
{ "tx_total_pkts", sizeof(((struct sxe_adapter *)0)->stats.hw.tpt),
offsetof(struct sxe_adapter, stats.hw.tpt)},
{ "rx_total_bytes", sizeof(((struct sxe_adapter *)0)->stats.hw.tor),
offsetof(struct sxe_adapter, stats.hw.tor)},
{ "rx_long_length_errors", sizeof(((struct sxe_adapter *)0)->stats.hw.roc),
offsetof(struct sxe_adapter, stats.hw.roc)},
{ "rx_short_length_errors",
sizeof(((struct sxe_adapter *)0)->stats.hw.ruc),
offsetof(struct sxe_adapter, stats.hw.ruc)},
{ "rx_short_length_with_bad_crc_errors",
sizeof(((struct sxe_adapter *)0)->stats.hw.rfc),
offsetof(struct sxe_adapter, stats.hw.rfc)},
{ "rx_crc_error", sizeof(((struct sxe_adapter *)0)->stats.hw.crcerrs),
offsetof(struct sxe_adapter, stats.hw.crcerrs)},
{ "rx_error_byte", sizeof(((struct sxe_adapter *)0)->stats.hw.errbc),
offsetof(struct sxe_adapter, stats.hw.errbc)},
{ "rx_length_errors", sizeof(((struct sxe_adapter *)0)->stats.hw.rlec),
offsetof(struct sxe_adapter, stats.hw.rlec)},
{ "rx_jabber_errors", sizeof(((struct sxe_adapter *)0)->stats.hw.rjc),
offsetof(struct sxe_adapter, stats.hw.rjc)},
};
static struct dentry *sxe_debugfs_root;
static s8 sxe_debugfs_reg_ops_buf[256] = "";
static ssize_t sxe_debugfs_common_ops_read(struct file *filp,
char __user *buffer, size_t count,
loff_t *ppos, char *debugfs_buf)
{
s8 *buf;
ssize_t ret;
struct sxe_adapter *adapter = filp->private_data;
if (*ppos != 0) {
ret = 0;
goto l_end;
}
buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->netdev->name,
debugfs_buf);
if (!buf) {
ret = -ENOMEM;
goto l_end;
}
if (count < strlen(buf)) {
ret = -ENOSPC;
goto l_free;
}
ret = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
l_free:
kfree(buf);
l_end:
return ret;
}
static ssize_t sxe_debugfs_reg_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
return sxe_debugfs_common_ops_read(filp, buffer, count, ppos,
sxe_debugfs_reg_ops_buf);
}
static ssize_t sxe_debugfs_reg_ops_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
ssize_t ret;
s32 cnt;
u32 reg, value;
struct sxe_adapter *adapter = filp->private_data;
struct sxe_hw *hw = &adapter->hw;
if (*ppos != 0) {
ret = 0;
goto l_end;
}
if (count >= sizeof(sxe_debugfs_reg_ops_buf)) {
ret = -ENOSPC;
goto l_end;
}
ret = simple_write_to_buffer(sxe_debugfs_reg_ops_buf,
sizeof(sxe_debugfs_reg_ops_buf) - 1, ppos,
buffer, count);
if (ret < 0)
goto l_end;
sxe_debugfs_reg_ops_buf[ret] = '\0';
if (strncmp(sxe_debugfs_reg_ops_buf, "write", 5) == 0) {
cnt = sscanf(&sxe_debugfs_reg_ops_buf[5], "%x %x", &reg,
&value);
if (cnt != 2) {
LOG_DEV_INFO("write <reg> <value>\n");
ret = count;
goto l_end;
}
if (reg >= pci_resource_len(adapter->pdev, 0)) {
LOG_DEV_INFO("write ops : reg addr err,\n"
"\taddr[%x]>bar0 max addr[0x100000]",
reg);
ret = -EINVAL;
goto l_end;
}
hw->setup.ops->reg_write(hw, reg, value);
value = hw->setup.ops->reg_read(hw, reg);
LOG_DEV_INFO("write: 0x%08x = 0x%08x\n", reg, value);
} else if (strncmp(sxe_debugfs_reg_ops_buf, "read", 4) == 0) {
cnt = kstrtoint(&sxe_debugfs_reg_ops_buf[4], 16, (s32 *)&reg);
if (cnt != 1) {
LOG_DEV_INFO("read <reg>\n");
ret = count;
goto l_end;
}
if (reg >= pci_resource_len(adapter->pdev, 0)) {
LOG_DEV_INFO("read ops : reg addr err,\n"
"\taddr[%x]>bar0 max addr[0x100000]",
reg);
ret = -EINVAL;
goto l_end;
}
value = hw->setup.ops->reg_read(hw, reg);
LOG_DEV_INFO("read 0x%08x = 0x%08x\n", reg, value);
} else {
LOG_DEV_INFO("unknown command %s\n", sxe_debugfs_reg_ops_buf);
LOG_DEV_INFO("available commands:\n");
LOG_DEV_INFO(" read <reg>\n");
LOG_DEV_INFO(" write <reg> <value>\n");
}
ret = count;
l_end:
return ret;
}
static const struct file_operations sxe_debugfs_reg_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = sxe_debugfs_reg_ops_read,
.write = sxe_debugfs_reg_ops_write,
};
static s8 debugfs_netdev_buf[256] = "";
static ssize_t sxe_debugfs_netdev_ops_read(struct file *filp,
char __user *buffer, size_t count,
loff_t *ppos)
{
return sxe_debugfs_common_ops_read(filp, buffer, count, ppos,
debugfs_netdev_buf);
}
static ssize_t sxe_debugfs_netdev_ops_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
ssize_t ret;
struct sxe_adapter *adapter = filp->private_data;
if (*ppos != 0) {
ret = 0;
goto l_end;
}
if (count >= sizeof(debugfs_netdev_buf)) {
ret = -ENOSPC;
goto l_end;
}
ret = simple_write_to_buffer(debugfs_netdev_buf,
sizeof(debugfs_netdev_buf) - 1, ppos,
buffer, count);
if (ret < 0)
goto l_end;
debugfs_netdev_buf[ret] = '\0';
if (!strncmp(debugfs_netdev_buf, "tx_timeout", 10)) {
#ifdef HAVE_TIMEOUT_TXQUEUE_IDX
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
UINT_MAX);
#else
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
#endif
LOG_DEV_INFO("tx_timeout called\n");
} else {
LOG_DEV_INFO("unknown command: %s\n", debugfs_netdev_buf);
LOG_DEV_INFO("available commands:\n");
LOG_DEV_INFO(" tx_timeout\n");
}
ret = count;
l_end:
return ret;
}
static const struct file_operations sxe_debugfs_netdev_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = sxe_debugfs_netdev_ops_read,
.write = sxe_debugfs_netdev_ops_write,
};
static ssize_t sxe_debugfs_hw_stats_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
u32 i;
u64 value;
s8 *offset;
struct sxe_debugfs_hw_stats *hw_stats_t;
struct sxe_adapter *adapter = filp->private_data;
hw_stats_t =
kzalloc(sizeof(struct sxe_debugfs_hw_stats) * SXE_HW_STATS_LEN,
GFP_ATOMIC);
stats_lock(adapter);
sxe_stats_update(adapter);
memcpy(hw_stats_t, hw_stats,
sizeof(struct sxe_debugfs_hw_stats) * SXE_HW_STATS_LEN);
stats_unlock(adapter);
for (i = 0; i < SXE_HW_STATS_LEN; i++) {
offset = (s8 *)adapter + hw_stats_t[i].stat_offset;
value = (hw_stats_t[i].sizeof_stat == sizeof(u64)) ?
*(u64 *)offset :
*(u32 *)offset;
LOG_DEV_INFO("%s: %llu\n", hw_stats_t[i].stat_string, value);
}
kfree(hw_stats_t);
return 0;
}
static const struct file_operations sxe_debugfs_hw_stats_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = sxe_debugfs_hw_stats_read,
};
static ssize_t sxe_debugfs_sfp_info_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct sxe_adapter *adapter = filp->private_data;
s32 ret;
enum sxe_sfp_type sfp_type;
u8 sfp_comp_code[SXE_SFP_COMP_CODE_SIZE];
u8 sfp_vendor_pn[SXE_SFP_VENDOR_PN_SIZE + 1] = { 0 };
LOG_INFO_BDF("sfp identify start\n");
ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_BASE_ADDR,
SXE_SFP_COMP_CODE_SIZE, sfp_comp_code);
if (ret) {
sfp_type = SXE_SFP_TYPE_NOT_PRESENT;
LOG_DEV_ERR("get sfp identifier failed, ret=%d\n", ret);
goto l_end;
}
LOG_DEV_INFO("sfp identifier=%x, cable_technology=%x,\n"
"\t10GB_code=%x, 1GB_code=%x\n",
sfp_comp_code[SXE_SFF_IDENTIFIER],
sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY],
sfp_comp_code[SXE_SFF_10GBE_COMP_CODES],
sfp_comp_code[SXE_SFF_1GBE_COMP_CODES]);
if (sfp_comp_code[SXE_SFF_IDENTIFIER] != SXE_SFF_IDENTIFIER_SFP) {
LOG_DEV_ERR("module type is not sfp/sfp+, offset=%d, type=%x\n",
SXE_SFF_IDENTIFIER,
sfp_comp_code[SXE_SFF_IDENTIFIER]);
sfp_type = SXE_SFP_TYPE_UNKNOWN;
goto l_end;
}
if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] &
SXE_SFF_DA_PASSIVE_CABLE) {
sfp_type = SXE_SFP_TYPE_DA_CU;
} else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
(SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) {
sfp_type = SXE_SFP_TYPE_SRLR;
} else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
SXE_SFF_1GBASET_CAPABLE) {
sfp_type = SXE_SFP_TYPE_1G_CU;
} else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
SXE_SFF_1GBASESX_CAPABLE) ||
(sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
SXE_SFF_1GBASELX_CAPABLE)) {
sfp_type = SXE_SFP_TYPE_1G_SXLX;
} else {
sfp_type = SXE_SFP_TYPE_UNKNOWN;
}
LOG_DEV_INFO("identify sfp, sfp_type=%d\n", sfp_type);
if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
SXE_SFF_1GBASESX_CAPABLE) &&
(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
SXE_SFF_10GBASESR_CAPABLE)) ||
((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
SXE_SFF_1GBASELX_CAPABLE) &&
(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
SXE_SFF_10GBASELR_CAPABLE))) {
LOG_DEV_INFO("identify sfp, sfp is multispeed\n");
} else {
LOG_DEV_INFO("identify sfp, sfp is not multispeed\n");
}
ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_VENDOR_PN,
SXE_SFP_VENDOR_PN_SIZE, sfp_vendor_pn);
if (ret) {
LOG_DEV_ERR("get sfp vendor pn failed, ret=%d\n", ret);
goto l_end;
}
LOG_DEV_INFO("sfp vendor pn: %s\n", sfp_vendor_pn);
ret = sxe_sfp_vendor_pn_cmp(sfp_vendor_pn);
if (!ret) {
LOG_DEV_WARN("an supported SFP module type was detected\n");
goto l_end;
}
LOG_DEV_WARN("an unsupported SFP module type was detected\n");
LOG_DEV_WARN("refer to the sxe ethernet adapters and devices user\n"
"\tguide for a list of supported modules\n");
l_end:
return 0;
}
static const struct file_operations sxe_debugfs_sfp_info_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = sxe_debugfs_sfp_info_read,
};
void sxe_debugfs_entries_init(struct sxe_adapter *adapter)
{
struct dentry *dir;
const char *name = pci_name(adapter->pdev);
adapter->debugfs_entries = debugfs_create_dir(name, sxe_debugfs_root);
dir = debugfs_create_file("reg_ops", 0600, adapter->debugfs_entries,
adapter, &sxe_debugfs_reg_ops_fops);
if (!dir || dir == ERR_PTR(-ENODEV))
LOG_INFO_BDF("debugfs:reg_ops file create failed\n");
dir = debugfs_create_file("netdev_ops", 0600, adapter->debugfs_entries,
adapter, &sxe_debugfs_netdev_ops_fops);
if (!dir || dir == ERR_PTR(-ENODEV))
LOG_INFO_BDF("debugfs:netdev_ops file create failed\n");
dir = debugfs_create_file("hw_stats", 0400, adapter->debugfs_entries,
adapter, &sxe_debugfs_hw_stats_fops);
if (!dir || dir == ERR_PTR(-ENODEV))
LOG_INFO_BDF("debugfs:hw_stats file create failed\n");
dir = debugfs_create_file("sfp_info", 0400, adapter->debugfs_entries,
adapter, &sxe_debugfs_sfp_info_fops);
if (!dir || dir == ERR_PTR(-ENODEV))
LOG_INFO_BDF("debugfs:sfp_info file create failed\n");
}
void sxe_debugfs_entries_exit(struct sxe_adapter *adapter)
{
debugfs_remove_recursive(adapter->debugfs_entries);
adapter->debugfs_entries = NULL;
}
void sxe_debugfs_init(void)
{
sxe_debugfs_root = debugfs_create_dir(SXE_DRV_NAME, NULL);
}
void sxe_debugfs_exit(void)
{
debugfs_remove_recursive(sxe_debugfs_root);
}

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_debugfs.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_DEBUGFS_H__
#define __SXE_DEBUGFS_H__
struct sxe_adapter;
void sxe_debugfs_entries_init(struct sxe_adapter *adapter);
void sxe_debugfs_entries_exit(struct sxe_adapter *adapter);
void sxe_debugfs_init(void);
void sxe_debugfs_exit(void);
#endif

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_errno.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_ERRNO_H__
#define __SXE_ERRNO_H__
#define SXE_ERR_MODULE_STANDARD 0
#define SXE_ERR_MODULE_PF 1
#define SXE_ERR_MODULE_VF 2
#define SXE_ERR_MODULE_HDC 3
#define SXE_ERR_MODULE_OFFSET 16
#define SXE_ERR_MODULE(module, errcode) \
(((module) << SXE_ERR_MODULE_OFFSET) | (errcode))
#define SXE_ERR_PF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode)
#define SXE_ERR_VF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
#define SXE_ERR_HDC(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
#define SXE_ERR_CONFIG EINVAL
#define SXE_ERR_PARAM EINVAL
#define SXE_ERR_RESET_FAILED EPERM
#define SXE_ERR_NO_SPACE ENOSPC
#define SXE_ERR_FNAV_CMD_INCOMPLETE EBUSY
#define SXE_ERR_MBX_LOCK_FAIL EBUSY
#define SXE_ERR_OPRATION_NOT_PERM EPERM
#define SXE_ERR_LINK_STATUS_INVALID EINVAL
#define SXE_ERR_LINK_SPEED_INVALID EINVAL
#define SXE_ERR_DEVICE_NOT_SUPPORTED EOPNOTSUPP
#define SXE_ERR_HDC_LOCK_BUSY EBUSY
#define SXE_ERR_HDC_FW_OV_TIMEOUT ETIMEDOUT
#define SXE_ERR_MDIO_CMD_TIMEOUT ETIMEDOUT
#define SXE_ERR_INVALID_LINK_SETTINGS EINVAL
#define SXE_ERR_FNAV_REINIT_FAILED EIO
#define SXE_ERR_CLI_FAILED EIO
#define SXE_ERR_MASTER_REQUESTS_PENDING SXE_ERR_PF(1)
#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT SXE_ERR_PF(2)
#define SXE_ERR_ENABLE_SRIOV_FAIL SXE_ERR_PF(3)
#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_PF(4)
#define SXE_ERR_SFP_NOT_PERSENT SXE_ERR_PF(5)
#define SXE_ERR_PHY_NOT_PERSENT SXE_ERR_PF(6)
#define SXE_ERR_PHY_RESET_FAIL SXE_ERR_PF(7)
#define SXE_ERR_FC_NOT_NEGOTIATED SXE_ERR_PF(8)
#define SXE_ERR_SFF_NOT_SUPPORTED SXE_ERR_PF(9)
#define SXEVF_ERR_MAC_ADDR_INVALID EINVAL
#define SXEVF_ERR_RESET_FAILED EIO
#define SXEVF_ERR_ARGUMENT_INVALID EINVAL
#define SXEVF_ERR_NOT_READY EBUSY
#define SXEVF_ERR_POLL_ACK_FAIL EIO
#define SXEVF_ERR_POLL_MSG_FAIL EIO
#define SXEVF_ERR_MBX_LOCK_FAIL EBUSY
#define SXEVF_ERR_REPLY_INVALID EINVAL
#define SXEVF_ERR_IRQ_NUM_INVALID EINVAL
#define SXEVF_ERR_PARAM EINVAL
#define SXEVF_ERR_MAILBOX_FAIL SXE_ERR_VF(1)
#define SXEVF_ERR_MSG_HANDLE_ERR SXE_ERR_VF(2)
#define SXEVF_ERR_DEVICE_NOT_SUPPORTED SXE_ERR_VF(3)
#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_VF(4)
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ethtool.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_ETHTOOL_H__
#define __SXE_ETHTOOL_H__
#include <linux/ethtool.h>
#include "sxe.h"
#define SXE_FNAV_RULES_TABLE_SIZE_UNIT (1024)
#define UDP_RSS_FLAGS (SXE_RSS_FIELD_IPV4_UDP | SXE_RSS_FIELD_IPV6_UDP)
#define SXE_SFP_INIT_WAIT_ITR_MIN (1000)
#define SXE_SFP_INIT_WAIT_ITR_MAX (2000)
#define SXE_TEST_GSTRING_ARRAY_SIZE sxe_self_test_suite_num_get()
#define SXE_RX_RING_NUM netdev->num_tx_queues
#define SXE_STATS_ARRAY_SIZE sxe_stats_num_get()
#define SXE_RING_STATS_LEN \
((netdev->num_tx_queues + SXE_RX_RING_NUM) * \
(sizeof(struct sxe_ring_stats) / sizeof(u64)))
#define SXE_DBU_PKT_BUF_STATS_LEN \
((sizeof(((struct sxe_adapter *)0)->stats.hw.dburxtcin) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.dburxtcout) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.dburxgdreecnt) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.dburxdrofpcnt) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.dbutxtcin) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.dbutxtcout)) / \
sizeof(u64))
#define SXE_DMA_QUEUE_STATS_NUM 5
#define SXE_DMA_QUEUE_STATS_LEN (SXE_DCB_8_TC * SXE_DMA_QUEUE_STATS_NUM)
#define SXE_DMA_STATS_LEN (SXE_DMA_QUEUE_STATS_LEN + sxe_dma_stats_num_get())
#define SXE_FC_STATS_LEN \
((sizeof(((struct sxe_adapter *)0)->stats.hw.prcpf) + \
sizeof(((struct sxe_adapter *)0)->stats.hw.pfct)) / \
sizeof(u64))
#define SXE_STATS_LEN \
(SXE_STATS_ARRAY_SIZE + SXE_RING_STATS_LEN + \
SXE_DBU_PKT_BUF_STATS_LEN + SXE_DMA_STATS_LEN + SXE_FC_STATS_LEN)
#define SXE_PRIV_FLAGS_LEGACY_RX BIT(0)
#ifdef SXE_IPSEC_CONFIGURE
#define SXE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
#endif
#define SXE_PRIV_FLAGS_STR_LEN sxe_priv_flags_num_get()
#define SXE_ETHTOOL_DUMP_REGS_NUM (SXE_MAC_REGS_NUM + SXE_MAC_STATS_REGS_NUM)
#define SXE_ETHTOOL_DUMP_REGS_LEN \
(SXE_MAC_REGS_VAL_LEN + SXE_MAC_STATS_REGS_VAL_LEN)
#define SXE_MAC_REGS_NUM sxe_mac_reg_num_get()
#define SXE_MAC_REGS_VAL_LEN (SXE_MAC_REGS_NUM * sizeof(u32))
#define SXE_MAC_STATS_REGS_NUM sxe_mac_stats_regs_num_get()
#define SXE_MAC_STATS_REGS_VAL_LEN (sizeof(u64) * SXE_MAC_STATS_REGS_NUM)
#define SXE_LOOPBACK_TEST_DESC_COUNT 64
#define SXE_LOOPBACK_TEST_LOOP 2
#define SXE_LOOPBACK_TEST_FRAME_SIZE 1024
#define SXE_LPBK_TX_DISB_WAIT_MIN (10000)
#define SXE_LPBK_TX_DISB_WAIT_MAX (20000)
#define SXE_NIC_RESET_WAIT_MIN (1000)
#define SXE_NIC_RESET_WAIT_MAX (2000)
enum { NETDEV_STATS, SXE_STATS };
struct sxe_ethtool_stats {
s8 stat_string[ETH_GSTRING_LEN];
u32 type;
u32 sizeof_stat;
u32 stat_offset;
};
struct sxe_mac_stats_info {
s8 stat_string[ETH_GSTRING_LEN];
u32 stat_offset;
};
u32 sxe_dma_stats_num_get(void);
u32 sxe_mac_stats_regs_num_get(void);
u32 sxe_self_test_suite_num_get(void);
u32 sxe_stats_num_get(void);
u32 sxe_priv_flags_num_get(void);
void sxe_ethtool_ops_set(struct net_device *netdev);
s32 sxe_fnav_dest_queue_parse(struct sxe_adapter *adapter, u64 ring_cookie,
u8 *queue);
s32 sxe_fw_wol_set(struct sxe_adapter *adapter, u32 enable);
#endif

View File

@ -0,0 +1,995 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_filter.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include "sxe_filter.h"
#include "sxe.h"
#include "sxe_hw.h"
#include "sxe_regs.h"
#include "sxe_tx_proc.h"
#include "sxe_ethtool.h"
#include "sxe_sriov.h"
extern struct workqueue_struct *sxe_fnav_workqueue;
extern struct kmem_cache *fnav_cache;
#define SXE_FNAV_BKT_HASH_MASK 0x1FFF
#define SXE_FNAV_HASH_REG_MASK 0xFFFFFFFF
#define SXE_SAMPLE_WORD_BITS (16)
void sxe_uc_addr_promisc_add(struct sxe_hw *hw, u16 pool_idx)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *uc_table =
adapter->mac_filter_ctxt.uc_addr_table;
struct sxe_uc_addr_table *entry;
u32 i;
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (test_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state) &&
entry->pool != pool_idx)
hw->filter.mac.ops->uc_addr_pool_add(hw, i, pool_idx);
}
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
}
void sxe_uc_addr_promisc_del(struct sxe_hw *hw, u16 pool_idx)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *uc_table =
adapter->mac_filter_ctxt.uc_addr_table;
struct sxe_uc_addr_table *entry;
u32 i;
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (test_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state) &&
entry->pool != pool_idx)
hw->filter.mac.ops->uc_addr_pool_del(hw, i, pool_idx);
}
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
}
void sxe_uc_addr_reuse_add(struct sxe_hw *hw, u32 rar_idx, const u8 *addr,
u16 pool_idx)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *uc_table =
adapter->mac_filter_ctxt.uc_addr_table;
struct sxe_uc_addr_table *entry;
struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt;
u32 i;
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (test_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state) &&
ether_addr_equal(addr, entry->addr) &&
entry->pool != pool_idx) {
hw->filter.mac.ops->uc_addr_pool_add(hw, i, pool_idx);
hw->filter.mac.ops->uc_addr_pool_add(hw, rar_idx,
entry->pool);
}
}
for (i = 0; i < vt_ctxt->num_vfs; i++) {
struct sxe_vf_info *vf_info = &vt_ctxt->vf_info[i];
if (vf_info->cast_mode == SXE_CAST_MODE_PROMISC)
hw->filter.mac.ops->uc_addr_pool_add(hw, rar_idx, i);
}
}
void sxe_uc_addr_reuse_del(struct sxe_hw *hw, const u8 *addr, u16 pool_idx)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *uc_table =
adapter->mac_filter_ctxt.uc_addr_table;
struct sxe_uc_addr_table *entry;
u32 i;
if (pool_idx == PF_POOL_INDEX(0))
hw->filter.mac.ops->uc_addr_pool_disable(hw, SXE_DEFAULT_UC_ADDR_IDX);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (test_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state) &&
ether_addr_equal(addr, entry->addr))
hw->filter.mac.ops->uc_addr_pool_del(hw, i, pool_idx);
}
}
s32 sxe_uc_addr_add(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table,
const u8 *addr, u16 pool)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *entry;
s32 ret;
u32 i;
if (is_zero_ether_addr(addr)) {
ret = -EINVAL;
LOG_ERROR_BDF("mac addr is zero.(err:%d)\n", ret);
goto l_out;
}
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (!test_and_set_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state)) {
ether_addr_copy(entry->addr, addr);
entry->pool = pool;
hw->filter.mac.ops->uc_addr_add(hw, i, entry->addr,
entry->pool);
sxe_uc_addr_reuse_add(hw, i, entry->addr, entry->pool);
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
ret = i;
LOG_INFO("mac addr:%pM pool:%u add to\n"
"\tuc_table[%u] success.\n",
addr, pool, i);
goto l_out;
}
}
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
ret = -ENOMEM;
LOG_ERROR_BDF("index:%u mac addr:%pM pool:%u add to uc filter fail.\n",
i, addr, pool);
l_out:
return ret;
}
s32 sxe_uc_addr_del(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table,
const u8 *addr, u16 pool)
{
struct sxe_adapter *adapter = hw->adapter;
struct sxe_uc_addr_table *entry;
s32 ret = 0;
u32 i;
if (is_zero_ether_addr(addr)) {
ret = -EINVAL;
LOG_WARN_BDF("mac addr is zero.(err:%d)\n", ret);
goto l_out;
}
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &uc_table[i];
if (entry->pool == pool &&
ether_addr_equal(addr, entry->addr)) {
if (test_and_clear_bit(SXE_UC_ADDR_ENTRY_USED,
&entry->state)) {
hw->filter.mac.ops->uc_addr_del(hw, i);
sxe_uc_addr_reuse_del(hw, addr, pool);
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
LOG_INFO("pool:%u mac addr:%pM uc_filter_addr[%u]\n"
"\tentry del success.\n", pool, addr, i);
goto l_out;
}
}
}
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
ret = -ENOMEM;
LOG_ERROR_BDF("index:%u mac addr:%pM pool:%u delete fail due to\n"
"\tnot exsit in uc filter.\n", i, addr, pool);
l_out:
return ret;
}
s32 sxe_uc_sync(struct net_device *netdev, const u8 *addr)
{
s32 ret;
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_hw *hw = &adapter->hw;
struct sxe_uc_addr_table *uc_table =
adapter->mac_filter_ctxt.uc_addr_table;
ret = sxe_uc_addr_add(hw, uc_table, addr, PF_POOL_INDEX(0));
return min_t(s32, ret, 0);
}
s32 sxe_uc_unsync(struct net_device *netdev, const u8 *addr)
{
s32 ret;
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_hw *hw = &adapter->hw;
ret = sxe_uc_addr_del(hw, adapter->mac_filter_ctxt.uc_addr_table, addr,
PF_POOL_INDEX(0));
if (ret)
LOG_ERROR_BDF("pool idx:%d addr:%pM del fail.\n",
PF_POOL_INDEX(0), addr);
return 0;
}
static void sxe_mc_hash_table_add(struct sxe_adapter *adapter, u8 *mc_addr)
{
u16 extracted;
u16 bit_index;
u16 reg_index;
adapter->mac_filter_ctxt.mc_hash_table_used++;
extracted = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
extracted &= SXE_MC_ADDR_EXTRACT_MASK;
LOG_DEV_DEBUG(" bit-vector = 0x%03X\n", extracted);
reg_index = (extracted >> SXE_MC_ADDR_SHIFT) & SXE_MC_ADDR_REG_MASK;
bit_index = extracted & SXE_MC_ADDR_BIT_MASK;
adapter->mac_filter_ctxt.mc_hash_table[reg_index] |= BIT(bit_index);
LOG_INFO("mc_addr:%pM extracted:0x%x reg_index:%u bit_index:%u\n"
"\tadd to mc_hash_table success.\n",
mc_addr, extracted, reg_index, bit_index);
}
#ifdef CONFIG_PCI_IOV
void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt;
u8 i;
u8 j;
u8 reg_idx;
u8 bit_idx;
u32 filter_ctl;
for (i = 0; i < vt_ctxt->num_vfs; i++) {
struct sxe_vf_info *vf_info = &vt_ctxt->vf_info[i];
for (j = 0; j < vf_info->mc_hash_used; j++) {
reg_idx = (vf_info->mc_hash[j] >> SXE_MC_ADDR_SHIFT) &
SXE_MC_ADDR_REG_MASK;
bit_idx = vf_info->mc_hash[j] & SXE_MC_ADDR_BIT_MASK;
hw->filter.mac.ops->mta_hash_table_update(hw, reg_idx,
bit_idx);
LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x\n"
"\treg_idx=%u, bit_idx=%u.\n",
i, vf_info->mc_hash_used, j,
vf_info->mc_hash[j], reg_idx, bit_idx);
}
filter_ctl = hw->filter.mac.ops->pool_rx_mode_get(hw, i);
if (vf_info->mc_hash_used)
filter_ctl |= SXE_VMOLR_ROMPE;
else
filter_ctl &= ~SXE_VMOLR_ROMPE;
hw->filter.mac.ops->pool_rx_mode_set(hw, filter_ctl, i);
}
}
#else
void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter)
{
}
#endif
s32 sxe_mc_addr_add(struct net_device *netdev)
{
struct sxe_adapter *adapter = netdev_priv(netdev);
struct sxe_hw *hw = &adapter->hw;
struct netdev_hw_addr *hw_addr;
struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
u8 i;
if (!netif_running(netdev))
return 0;
LOG_DEV_DEBUG("clearing MTA.\n");
mac_filter->mc_hash_table_used = 0;
memset(mac_filter->mc_hash_table, 0, sizeof(mac_filter->mc_hash_table));
netdev_for_each_mc_addr(hw_addr, netdev) {
LOG_DEV_DEBUG("adding the multicast addresses:\n");
sxe_mc_hash_table_add(adapter, hw_addr->addr);
}
for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
hw->filter.mac.ops->mta_hash_table_set(hw, i,
mac_filter->mc_hash_table[i]);
}
if (mac_filter->mc_hash_table_used)
hw->filter.mac.ops->mc_filter_enable(hw);
sxe_vf_mc_addr_restore(adapter);
LOG_DEV_DEBUG("add multicast address complete.\n");
return netdev_mc_count(netdev);
}
void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
u8 mac_addr[ETH_ALEN];
memcpy(mac_addr, adapter->mac_filter_ctxt.cur_mac_addr, ETH_ALEN);
hw->filter.mac.ops->fc_mac_addr_set(hw, mac_addr);
}
void sxe_mac_addr_set(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
struct sxe_uc_addr_table *entry =
&adapter->mac_filter_ctxt.uc_addr_table[SXE_DEFAULT_UC_ADDR_IDX];
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
sxe_uc_addr_reuse_del(hw, entry->addr, PF_POOL_INDEX(0));
memcpy(&entry->addr, adapter->mac_filter_ctxt.cur_mac_addr, ETH_ALEN);
entry->pool = PF_POOL_INDEX(0);
set_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state);
hw->filter.mac.ops->uc_addr_add(hw, SXE_DEFAULT_UC_ADDR_IDX,
entry->addr, entry->pool);
sxe_uc_addr_reuse_add(hw, SXE_DEFAULT_UC_ADDR_IDX, entry->addr,
entry->pool);
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
sxe_fc_mac_addr_set(adapter);
}
static s32 sxe_uc_filter_init(struct sxe_adapter *adapter)
{
s32 ret = 0;
struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
mac_filter->uc_addr_table =
kcalloc(SXE_UC_ENTRY_NUM_MAX, sizeof(struct sxe_uc_addr_table),
GFP_KERNEL);
if (!mac_filter->uc_addr_table) {
ret = -ENOMEM;
LOG_ERROR_BDF("rar entry:%d size:%lu mac table kcalloc fail.(err:%d)",
SXE_UC_ENTRY_NUM_MAX, sizeof(struct sxe_uc_addr_table),
ret);
}
spin_lock_init(&adapter->mac_filter_ctxt.uc_table_lock);
return ret;
}
s32 sxe_mac_filter_init(struct sxe_adapter *adapter)
{
s32 ret;
ret = sxe_uc_filter_init(adapter);
if (ret) {
LOG_ERROR_BDF("uc filter init failed\n");
goto l_ret;
}
l_ret:
return ret;
}
void sxe_mac_filter_reset(struct sxe_adapter *adapter)
{
struct sxe_uc_addr_table *entry;
u32 i;
spin_lock(&adapter->mac_filter_ctxt.uc_table_lock);
for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
entry = &adapter->mac_filter_ctxt.uc_addr_table[i];
clear_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state);
}
spin_unlock(&adapter->mac_filter_ctxt.uc_table_lock);
adapter->mac_filter_ctxt.mc_hash_table_used = 0;
}
void sxe_mac_filter_destroy(struct sxe_adapter *adapter)
{
kfree(adapter->mac_filter_ctxt.uc_addr_table);
}
void sxe_fnav_rules_restore(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
struct hlist_node *node;
struct sxe_fnav_rule_node *rule;
u64 ring_cookie;
u8 queue;
spin_lock(&adapter->fnav_ctxt.specific_lock);
if (!hlist_empty(&adapter->fnav_ctxt.rules_list)) {
hw->dbu.ops->fnav_specific_rule_mask_set(hw,
&adapter->fnav_ctxt.rules_mask);
hlist_for_each_entry_safe(rule, node,
&adapter->fnav_ctxt.rules_list, node) {
ring_cookie = rule->ring_cookie;
sxe_fnav_dest_queue_parse(adapter, ring_cookie, &queue);
hw->dbu.ops->fnav_specific_rule_add(hw,
&rule->rule_info, rule->sw_idx, queue);
}
}
spin_unlock(&adapter->fnav_ctxt.specific_lock);
}
void sxe_fnav_rules_clean(struct sxe_adapter *adapter)
{
struct hlist_node *container_node;
struct sxe_fnav_rule_node *rule;
spin_lock(&adapter->fnav_ctxt.specific_lock);
hlist_for_each_entry_safe(rule, container_node,
&adapter->fnav_ctxt.rules_list, node) {
hlist_del(&rule->node);
kfree(rule);
}
adapter->fnav_ctxt.rule_cnt = 0;
spin_unlock(&adapter->fnav_ctxt.specific_lock);
}
static bool sxe_fnav_is_sample_protocol_supported(__be16 protocol)
{
return !(protocol != htons(ETH_P_IP) &&
protocol != htons(ETH_P_IPV6));
}
static s32 sxe_fnav_sample_header_len_check(struct sk_buff *skb,
union sxe_sample_data_hdr *hdr)
{
s32 ret = 0;
if (unlikely(hdr->network <= skb->data)) {
ret = -SXE_ERR_PARAM;
LOG_DEBUG("hdr.network <= skb->data\n");
goto l_end;
}
if (unlikely(skb_tail_pointer(skb) < hdr->network + 40)) {
ret = -SXE_ERR_PARAM;
LOG_DEBUG("skb_tail_pointer(skb) < hdr->network + 40\n");
goto l_end;
}
l_end:
return ret;
}
static s32 sxe_fnav_sample_tcp_ip_header_check(union sxe_sample_data_hdr *hdr,
struct sk_buff *skb,
unsigned int *hlen)
{
int l4_proto;
s32 ret = 0;
switch (hdr->ipv4->version) {
case SXE_IPV4:
*hlen = (hdr->network[0] & 0x0F) << 2;
l4_proto = hdr->ipv4->protocol;
break;
case SXE_IPV6:
*hlen = hdr->network - skb->data;
l4_proto = ipv6_find_hdr(skb, hlen, IPPROTO_TCP, NULL, NULL);
*hlen -= hdr->network - skb->data;
break;
default:
ret = -SXE_ERR_PARAM;
LOG_ERROR("unsupported l3 protocol:%d\n", hdr->ipv4->version);
goto l_end;
}
if (l4_proto != IPPROTO_TCP) {
ret = -SXE_ERR_PARAM;
LOG_INFO("unsupported l4 protocol:%d\n", l4_proto);
goto l_end;
}
if (unlikely(skb_tail_pointer(skb) <
hdr->network + *hlen + sizeof(struct tcphdr))) {
ret = -SXE_ERR_PARAM;
LOG_ERROR("error on length skb_tail_pointer=0x%p <\n"
"\t(hdr->network + *hlen + sizeof(struct tcphdr))=0x%p\n",
skb_tail_pointer(skb),
(hdr->network + *hlen + sizeof(struct tcphdr)));
goto l_end;
}
l_end:
return ret;
}
static void sxe_sample_hash_iter_compute(u8 bit_n, u32 *common_hash,
u32 *bucket_hash, u32 *sig_hash,
u32 lo_hash_dword, u32 hi_hash_dword)
{
u32 n = bit_n;
if (SXE_SAMPLE_COMMON_HASH_KEY & BIT(n))
*common_hash ^= lo_hash_dword >> n;
else if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n))
*bucket_hash ^= lo_hash_dword >> n;
else if (SXE_FNAV_SAMPLE_HASH_KEY & BIT(n))
*sig_hash ^= lo_hash_dword << (SXE_SAMPLE_WORD_BITS - n);
if (SXE_SAMPLE_COMMON_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS))
*common_hash ^= hi_hash_dword >> n;
else if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS))
*bucket_hash ^= hi_hash_dword >> n;
else if (SXE_FNAV_SAMPLE_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS))
*sig_hash ^= hi_hash_dword << (SXE_SAMPLE_WORD_BITS - n);
}
static u32 sxe_sample_hash_compute(union sxe_sample_hash_dword input,
union sxe_sample_hash_dword common)
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
u8 i;
flow_vm_vlan = ntohl(input.dword);
hi_hash_dword = ntohl(common.dword);
lo_hash_dword = (hi_hash_dword >> SXE_SAMPLE_WORD_BITS) |
(hi_hash_dword << SXE_SAMPLE_WORD_BITS);
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> SXE_SAMPLE_WORD_BITS);
sxe_sample_hash_iter_compute(0, &common_hash, &bucket_hash, &sig_hash,
lo_hash_dword, hi_hash_dword);
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << SXE_SAMPLE_WORD_BITS);
for (i = 1; i < SXE_SAMPLE_WORD_BITS; i++) {
sxe_sample_hash_iter_compute(i, &common_hash, &bucket_hash,
&sig_hash, lo_hash_dword,
hi_hash_dword);
}
bucket_hash ^= common_hash;
bucket_hash &= SXE_SAMPLE_HASH_MASK;
sig_hash ^= common_hash << SXE_SAMPLE_WORD_BITS;
sig_hash &= SXE_SAMPLE_HASH_MASK << SXE_SAMPLE_WORD_BITS;
return sig_hash ^ bucket_hash;
}
static void sxe_fnav_sample_rule_add(struct sxe_adapter *adapter, u64 hash_cmd)
{
struct sxe_hw *hw = &adapter->hw;
struct sxe_fnav_sample_filter *input, *filter;
u32 key;
input = kzalloc(sizeof(*input), GFP_ATOMIC);
if (!input) {
LOG_ERROR_BDF("fnav sample rule add failed, no memory\n");
hw->dbu.ops->fnav_single_sample_rule_del(hw,
(u32)(hash_cmd & SXE_FNAV_HASH_REG_MASK));
goto l_end;
}
key = (hash_cmd & SXE_FNAV_BKT_HASH_MASK);
input->hash = (u32)(hash_cmd & SXE_FNAV_HASH_REG_MASK);
spin_lock(&adapter->fnav_ctxt.sample_lock);
hash_for_each_possible(adapter->fnav_ctxt.sample_list, filter, hlist,
key) {
if (filter->hash == input->hash) {
kfree(input);
goto l_unlock;
}
}
hash_add(adapter->fnav_ctxt.sample_list, &input->hlist, key);
adapter->fnav_ctxt.sample_rules_cnt++;
l_unlock:
spin_unlock(&adapter->fnav_ctxt.sample_lock);
l_end:
;
}
static void sxe_fnav_sample_rule_add_task(struct work_struct *work)
{
struct sxe_fnav_sample_work_info *sample_work =
container_of(work, struct sxe_fnav_sample_work_info, work_st);
if (!sample_work)
goto l_end;
sxe_fnav_sample_rule_add(sample_work->adapter, sample_work->hash);
kmem_cache_free(fnav_cache, sample_work);
l_end:
;
}
s32 sxe_fnav_sample_rule_get(struct sxe_ring *ring,
struct sxe_tx_buffer *tx_buffer)
{
struct sxe_irq_data *irq_data = ring->irq_data;
struct sxe_hw *hw = &irq_data->adapter->hw;
struct sxe_adapter *adapter = hw->adapter;
union sxe_sample_hash_dword input = { .dword = 0 };
union sxe_sample_hash_dword common = { .dword = 0 };
union sxe_sample_data_hdr hdr;
struct tcphdr *th;
unsigned int hlen;
struct sk_buff *skb;
__be16 vlan_id;
bool is_supported;
s32 ret;
u32 hash_value;
u64 hash_cmd;
struct sxe_fnav_sample_work_info *add_work = NULL;
LOG_DEBUG_BDF("in sample mode, sample_rate=%u, fnav_sample_count=%u\n",
ring->fnav_sample_rate, ring->fnav_sample_count);
if (!irq_data || !ring->fnav_sample_rate)
goto l_end;
ring->fnav_sample_count++;
is_supported =
sxe_fnav_is_sample_protocol_supported(tx_buffer->protocol);
if (!is_supported) {
LOG_DEBUG_BDF("sample protocol=[%d] unsupported\n",
tx_buffer->protocol);
goto l_end;
}
skb = tx_buffer->skb;
hdr.network = skb_network_header(skb);
ret = sxe_fnav_sample_header_len_check(skb, &hdr);
if (ret) {
LOG_ERROR_BDF("sample header len check failed. ret=%d\n", ret);
goto l_end;
}
ret = sxe_fnav_sample_tcp_ip_header_check(&hdr, skb, &hlen);
if (ret) {
LOG_INFO("sample tcp ip process err. ret=%d\n", ret);
goto l_end;
}
th = (struct tcphdr *)(hdr.network + hlen);
LOG_DEBUG_BDF("tcp is fin ? :%s, is syn ? :%s\n",
th->fin ? "yes" : "no", th->syn ? "yes" : "no");
if (th->fin ||
(!th->syn && ring->fnav_sample_count < ring->fnav_sample_rate))
goto l_end;
ring->fnav_sample_count = 0;
vlan_id = htons(tx_buffer->tx_features >> SXE_TX_FEATURE_VLAN_SHIFT);
input.formatted.vlan_id = vlan_id;
if (tx_buffer->tx_features &
(SXE_TX_FEATURE_SW_VLAN | SXE_TX_FEATURE_HW_VLAN))
common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ tx_buffer->protocol;
common.port.dst ^= th->source;
switch (hdr.ipv4->version) {
case SXE_IPV4:
input.formatted.flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
break;
case SXE_IPV6:
input.formatted.flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV6;
common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
hdr.ipv6->saddr.s6_addr32[1] ^
hdr.ipv6->saddr.s6_addr32[2] ^
hdr.ipv6->saddr.s6_addr32[3] ^
hdr.ipv6->daddr.s6_addr32[0] ^
hdr.ipv6->daddr.s6_addr32[1] ^
hdr.ipv6->daddr.s6_addr32[2] ^
hdr.ipv6->daddr.s6_addr32[3];
break;
default:
break;
}
LOG_DEBUG_BDF("fnav sample success, start write hw\n");
hash_value = sxe_sample_hash_compute(input, common);
hw->dbu.ops->fnav_sample_hash_cmd_get(hw, input.formatted.flow_type,
hash_value, ring->idx, &hash_cmd);
if (!adapter->fnav_ctxt.is_sample_table_overflowed &&
!workqueue_congested(WORK_CPU_UNBOUND, sxe_fnav_workqueue)) {
add_work = kmem_cache_zalloc(fnav_cache, GFP_ATOMIC);
if (!add_work)
return -ENOMEM;
INIT_WORK(&add_work->work_st, sxe_fnav_sample_rule_add_task);
add_work->adapter = adapter;
add_work->hash = hash_cmd;
queue_work(sxe_fnav_workqueue, &add_work->work_st);
hw->dbu.ops->fnav_sample_hash_set(hw, hash_cmd);
}
l_end:
return 0;
}
static void sxe_fnav_sw_specific_rule_add(struct sxe_adapter *adapter,
struct sxe_fnav_rule_node *add_rule)
{
struct sxe_hw *hw = &adapter->hw;
struct hlist_node *next;
struct sxe_fnav_rule_node *rule = NULL;
struct sxe_fnav_rule_node *pre_node = NULL;
u16 sw_idx = add_rule->sw_idx;
s32 ret;
hlist_for_each_entry_safe(rule, next, &adapter->fnav_ctxt.rules_list,
node) {
if (rule->sw_idx >= sw_idx)
break;
pre_node = rule;
}
LOG_DEBUG_BDF("add specific fnav rule in sw_idx[%u]\n", sw_idx);
if (rule && rule->sw_idx == sw_idx) {
LOG_DEBUG_BDF("rule->sw_idx == sw_idx == %u, show bkt_hash.\n"
"\told bkt_hash[0x%x], input new bkt_hash[0x%x]\n",
sw_idx, rule->rule_info.ntuple.bkt_hash,
add_rule->rule_info.ntuple.bkt_hash);
if (rule->rule_info.ntuple.bkt_hash !=
add_rule->rule_info.ntuple.bkt_hash) {
ret = hw->dbu.ops->fnav_specific_rule_del(hw,
&rule->rule_info, sw_idx);
if (ret) {
LOG_ERROR_BDF("delete fnav rule in sw_idx[%d]\n"
"\tfailed\n",
sw_idx);
}
}
hlist_del(&rule->node);
kfree(rule);
adapter->fnav_ctxt.rule_cnt--;
}
INIT_HLIST_NODE(&add_rule->node);
if (pre_node)
hlist_add_behind(&add_rule->node, &pre_node->node);
else
hlist_add_head(&add_rule->node, &adapter->fnav_ctxt.rules_list);
adapter->fnav_ctxt.rule_cnt++;
}
static void sxe_fnav_specific_hash_iter_compute(u8 bit_n, u32 *bucket_hash,
u32 lo_hash_dword,
u32 hi_hash_dword)
{
u32 n = bit_n;
if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n))
*bucket_hash ^= lo_hash_dword >> n;
if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n + 16))
*bucket_hash ^= hi_hash_dword >> n;
}
static void sxe_fnav_specific_hash_compute(union sxe_fnav_rule_info *input_rule,
union sxe_fnav_rule_info *input_mask)
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 bucket_hash = 0;
__be32 hi_dword = 0;
u8 i;
for (i = 0; i <= 10; i++)
input_rule->fast_access[i] &= input_mask->fast_access[i];
flow_vm_vlan = ntohl(input_rule->fast_access[0]);
for (i = 1; i <= 10; i++)
hi_dword ^= input_rule->fast_access[i];
hi_hash_dword = ntohl(hi_dword);
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
sxe_fnav_specific_hash_iter_compute(0, &bucket_hash, lo_hash_dword,
hi_hash_dword);
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
for (i = 1; i <= 15; i++) {
sxe_fnav_specific_hash_iter_compute(i, &bucket_hash,
lo_hash_dword, hi_hash_dword);
}
input_rule->ntuple.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
LOG_DEBUG("fnav bkt_hash=0x%x\n", input_rule->ntuple.bkt_hash);
}
s32 sxe_fnav_specific_rule_add_process(struct sxe_adapter *adapter,
struct sxe_fnav_rule_node *input_rule,
union sxe_fnav_rule_info *mask, u8 queue)
{
s32 ret = 0;
struct sxe_hw *hw = &adapter->hw;
spin_lock(&adapter->fnav_ctxt.specific_lock);
LOG_DEBUG_BDF("add specific fnav mask---rule_info:vm_pool[%u],\n"
"\tflow_type[0x%x], vlan_id[%u], dst_ip[%x:%x:%x:%x],\n"
"\tsrc_ip[%x:%x:%x:%x], dst_port[%u], src_port[%u],\n"
"\tflex_bytes[0x%x], bkt_hash[0x%x]\n",
mask->ntuple.vm_pool, mask->ntuple.flow_type,
mask->ntuple.vlan_id, mask->ntuple.dst_ip[0],
mask->ntuple.dst_ip[1], mask->ntuple.dst_ip[2],
mask->ntuple.dst_ip[3], mask->ntuple.src_ip[0],
mask->ntuple.src_ip[1], mask->ntuple.src_ip[2],
mask->ntuple.src_ip[3], mask->ntuple.dst_port,
mask->ntuple.src_port, mask->ntuple.flex_bytes,
mask->ntuple.bkt_hash);
if (hlist_empty(&adapter->fnav_ctxt.rules_list)) {
LOG_DEBUG_BDF("new fnav mask added\n");
memcpy(&adapter->fnav_ctxt.rules_mask, mask, sizeof(*mask));
ret = hw->dbu.ops->fnav_specific_rule_mask_set(hw, mask);
if (ret) {
LOG_MSG_ERR(drv, "error writing mask\n");
goto l_err_unlock;
}
} else if (memcmp(&adapter->fnav_ctxt.rules_mask, mask,
sizeof(*mask))) {
LOG_MSG_ERR(drv, "only one mask supported per port\n");
goto l_err_unlock;
}
sxe_fnav_specific_hash_compute(&input_rule->rule_info, mask);
LOG_DEBUG_BDF("add specific fnav rule---filter:vm_pool[%u],\n"
"\tflow_type[0x%x], vlan_id[%u], dst_ip[%x:%x:%x:%x],\n"
"\tsrc_ip[%x:%x:%x:%x], dst_port[%u], src_port[%u],\n"
"\tflex_bytes[0x%x], bkt_hash[0x%x],\n"
"\tsw_idx[%u], ring_cookie[0x%llx]\n",
input_rule->rule_info.ntuple.vm_pool,
input_rule->rule_info.ntuple.flow_type,
input_rule->rule_info.ntuple.vlan_id,
input_rule->rule_info.ntuple.dst_ip[0],
input_rule->rule_info.ntuple.dst_ip[1],
input_rule->rule_info.ntuple.dst_ip[2],
input_rule->rule_info.ntuple.dst_ip[3],
input_rule->rule_info.ntuple.src_ip[0],
input_rule->rule_info.ntuple.src_ip[1],
input_rule->rule_info.ntuple.src_ip[2],
input_rule->rule_info.ntuple.src_ip[3],
input_rule->rule_info.ntuple.dst_port,
input_rule->rule_info.ntuple.src_port,
input_rule->rule_info.ntuple.flex_bytes,
input_rule->rule_info.ntuple.bkt_hash, input_rule->sw_idx,
input_rule->ring_cookie);
ret = hw->dbu.ops->fnav_specific_rule_add(hw, &input_rule->rule_info,
input_rule->sw_idx, queue);
if (ret) {
LOG_ERROR_BDF("set specific rule failed, ret = %d\n", ret);
goto l_err_unlock;
}
sxe_fnav_sw_specific_rule_add(adapter, input_rule);
spin_unlock(&adapter->fnav_ctxt.specific_lock);
return 0;
l_err_unlock:
ret = -EINVAL;
spin_unlock(&adapter->fnav_ctxt.specific_lock);
return ret;
}
int sxe_fnav_sw_specific_rule_del(struct sxe_adapter *adapter, u16 sw_idx)
{
struct sxe_hw *hw = &adapter->hw;
struct hlist_node *next;
struct sxe_fnav_rule_node *rule = NULL;
int ret = -EINVAL;
hlist_for_each_entry_safe(rule, next, &adapter->fnav_ctxt.rules_list,
node) {
if (rule->sw_idx >= sw_idx) {
LOG_INFO("rule->sw_idx = %u; sw_idx = %u\n",
rule->sw_idx, sw_idx);
break;
}
}
if (rule && rule->sw_idx == sw_idx) {
LOG_DEBUG_BDF("delete rule in sw_idx[%u]\n", sw_idx);
ret = hw->dbu.ops->fnav_specific_rule_del(hw, &rule->rule_info,
sw_idx);
if (ret) {
LOG_ERROR_BDF("delete fnav rule in sw_idx[%d]\n"
"\tfailed\n",
sw_idx);
}
hlist_del(&rule->node);
kfree(rule);
adapter->fnav_ctxt.rule_cnt--;
} else {
LOG_ERROR_BDF("fnav rule in sw_idx[%u] not found\n", sw_idx);
}
return ret;
}
u64 sxe_fnav_max_rule_num_get(u32 rules_table_size)
{
return (u64)((SXE_FNAV_RULES_TABLE_SIZE_UNIT << rules_table_size) - 2);
}
struct sxe_fnav_rule_node *
sxe_fnav_specific_rule_find(struct sxe_adapter *adapter, u32 location)
{
struct sxe_fnav_rule_node *rule = NULL;
struct hlist_node *next;
hlist_for_each_entry_safe(rule, next, &adapter->fnav_ctxt.rules_list,
node) {
if (location <= rule->sw_idx) {
LOG_INFO("location = %u, sw_idx = %u\n", location,
rule->sw_idx);
break;
}
}
if (!rule || location != rule->sw_idx)
rule = NULL;
LOG_INFO("loc[%u] rule find finish and %s\n", location,
rule ? "found" : "not found");
return rule;
}

View File

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_filter.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_FILTER_H__
#define __SXE_FILTER_H__
#include <net/ipv6.h>
#include <net/ip.h>
#include "sxe.h"
#define SXE_DEFAULT_UC_ADDR_IDX (0)
#define SXE_DEFAULT_MAC_POOL_IDX (0)
#define SXE_UC_ADDR_ENTRY_USED (0x1)
#define SXE_FNAV_DEFAULT_SAMPLE_RATE (200)
#define SXE_FNAV_RULES_TABLE_PKT_SIZE (32)
enum sxe_fnav_rules_table_size {
SXE_FNAV_RULES_TABLE_SIZE_NONE = 0,
SXE_FNAV_RULES_TABLE_SIZE_64K = 1,
SXE_FNAV_RULES_TABLE_SIZE_128K = 2,
SXE_FNAV_RULES_TABLE_SIZE_256K = 3,
};
union sxe_sample_data_hdr {
unsigned char *network;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
};
void sxe_uc_addr_promisc_add(struct sxe_hw *hw, u16 pool_idx);
void sxe_uc_addr_promisc_del(struct sxe_hw *hw, u16 pool_idx);
void sxe_uc_addr_reuse_add(struct sxe_hw *hw, u32 rar_idx, const u8 *addr,
u16 pool_idx);
void sxe_uc_addr_reuse_del(struct sxe_hw *hw, const u8 *addr, u16 pool_idx);
s32 sxe_uc_addr_add(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table,
const u8 *addr, u16 pool);
s32 sxe_uc_addr_del(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table,
const u8 *addr, u16 pool);
s32 sxe_mc_addr_add(struct net_device *netdev);
s32 sxe_mac_filter_init(struct sxe_adapter *adapter);
void sxe_mac_filter_destroy(struct sxe_adapter *adapter);
s32 sxe_uc_sync(struct net_device *netdev, const u8 *addr);
s32 sxe_uc_unsync(struct net_device *netdev, const u8 *addr);
void sxe_mac_filter_reset(struct sxe_adapter *adapter);
void sxe_mac_addr_set(struct sxe_adapter *adapter);
void sxe_fnav_rules_restore(struct sxe_adapter *adapter);
void sxe_fnav_rules_clean(struct sxe_adapter *adapter);
s32 sxe_fnav_sample_rule_get(struct sxe_ring *ring,
struct sxe_tx_buffer *first_buffer);
s32 sxe_fnav_specific_rule_add_process(struct sxe_adapter *adapter,
struct sxe_fnav_rule_node *input_rule,
union sxe_fnav_rule_info *mask,
u8 queue);
int sxe_fnav_sw_specific_rule_del(struct sxe_adapter *adapter, u16 sw_idx);
u64 sxe_fnav_max_rule_num_get(u32 rules_table_size);
struct sxe_fnav_rule_node *
sxe_fnav_specific_rule_find(struct sxe_adapter *adapter, u32 location);
void sxe_fc_mac_addr_set(struct sxe_adapter *adapter);
void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter);
#endif

View File

@ -0,0 +1,320 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_host_cli.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe.h"
#include "sxe_log.h"
#include "sxe_host_cli.h"
#include "sxe_host_hdc.h"
#include "sxe_ioctl.h"
static dev_t sxe_cdev_major;
static struct class *sxe_cdev_class;
/* in order to protect the data */
static struct mutex sxe_minor_lock;
static DEFINE_IDR(sxe_minor_idr);
static s32 sxe_cli_open(struct inode *inode, struct file *filep)
{
struct sxe_adapter *adapter;
adapter =
container_of(inode->i_cdev, struct sxe_adapter, cdev_info.cdev);
LOG_DEBUG_BDF("open char dev of adapter[%p]\n", adapter);
filep->private_data = adapter;
return 0;
}
static s32 sxe_cli_user_input_param_check(u64 trace_id, u8 *in_data, u16 in_len,
u8 *out_data, u16 out_len)
{
s32 ret = -EINVAL;
if (!in_data || !out_data) {
LOG_ERROR("trace_id=0x%llx cmd parameter invalid,\n"
"\tin_data=%p, out_data=%p\n",
trace_id, in_data, out_data);
goto l_out;
}
if (in_len == 0 || out_len == 0 || in_len > HDC_CACHE_TOTAL_LEN ||
out_len > HDC_CACHE_TOTAL_LEN) {
LOG_ERROR("trace_id=0x%llx cmd parameter invalid,\n"
"\tinlen=%d, outlen=%d\n",
trace_id, in_len, out_len);
goto l_out;
}
return 0;
l_out:
return ret;
}
static s32 sxe_do_cli_cmd(struct sxe_hw *hw, unsigned long arg)
{
s32 ret = -SXE_FAILED;
u8 *in_data;
u16 in_len;
u8 *out_data;
u16 out_len;
u64 trace_id;
struct sxe_driver_cmd cmd;
struct sxe_adapter *adapter = hw->adapter;
struct sxeioctlsynccmd __user *user_cmd =
(struct sxeioctlsynccmd __user *)arg;
struct sxeioctlsynccmd *user_cmd_buf =
kzalloc(sizeof(struct sxeioctlsynccmd), GFP_KERNEL);
if (!user_cmd_buf) {
LOG_ERROR_BDF("kzalloc user_cmd_buf mem failed\n");
ret = -ENOMEM;
goto l_ret;
}
if (copy_from_user(user_cmd_buf, (void __user *)user_cmd,
sizeof(struct sxeioctlsynccmd))) {
LOG_ERROR_BDF("hw[%p] , copy from user err\n", hw);
ret = -EFAULT;
goto l_free;
}
in_data = user_cmd_buf->indata;
in_len = user_cmd_buf->inlen;
out_data = user_cmd_buf->outdata;
out_len = user_cmd_buf->outlen;
trace_id = user_cmd_buf->traceid;
LOG_DEBUG_BDF("get user cmd: trace_id=0x%llx,\n"
"\tin_data len=%u, out_data len=%u\n",
trace_id, in_len, out_len);
ret = sxe_cli_user_input_param_check(trace_id, in_data, in_len,
out_data, out_len);
if (ret)
goto l_free;
cmd.req = in_data;
cmd.req_len = in_len;
cmd.resp = out_data;
cmd.resp_len = out_len;
cmd.trace_id = trace_id;
cmd.opcode = SXE_CMD_MAX;
cmd.is_interruptible = true;
ret = sxe_cli_cmd_trans(hw, &cmd);
if (ret) {
LOG_ERROR_BDF("sxe cli cmd trace_id=0x%llx\n"
"\ttrans error, ret=%d\n",
trace_id, ret);
goto l_free;
}
l_free:
kfree(user_cmd_buf);
user_cmd_buf = NULL;
l_ret:
return ret;
}
static long sxe_cli_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
long ret = -ENOTTY;
struct sxe_hw *hw;
struct sxe_adapter *adapter;
if (!filep || cmd == 0 || arg == 0) {
LOG_ERROR("filep=%p cmd=%d arg=%ld\n", filep, cmd, arg);
ret = -EINVAL;
goto l_ioctl_failed;
}
adapter = (struct sxe_adapter *)filep->private_data;
LOG_DEBUG_BDF("driver ioctl cmd=%x, arg=0x%lx\n", cmd, arg);
if (adapter) {
switch (cmd) {
case SXE_CMD_IOCTL_SYNC_CMD:
hw = &adapter->hw;
ret = sxe_do_cli_cmd(hw, arg);
break;
default:
LOG_ERROR_BDF("unknown ioctl cmd, filep=%p, cmd=%d,\n"
"\targ=0x%8.8lx\n",
filep, cmd, arg);
break;
}
} else {
LOG_WARN_BDF("can found cdev\n");
ret = -ENODEV;
goto l_ioctl_failed;
}
if (ret) {
LOG_ERROR_BDF("filp=%p, cmd=%d, arg=%lx, ret=%ld\n", filep, cmd,
arg, ret);
goto l_ioctl_failed;
}
return SXE_SUCCESS;
l_ioctl_failed:
return ret;
}
const struct file_operations sxe_cdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = sxe_cli_ioctl,
.open = sxe_cli_open,
.release = NULL,
};
static void sxe_pci_addr_get(struct pci_dev *pci_dev,
struct sxe_pci_addr *pci_addr)
{
pci_addr->domain = pci_domain_nr(pci_dev->bus);
pci_addr->bus = pci_dev->bus->number;
pci_addr->deviceno =
(((pci_dev->devfn) >> PCI_BDF_DEV_SHIFT) & PCI_BDF_DEV_MASK);
pci_addr->devfn = ((pci_dev->devfn) & PCI_BDF_FUNC_MASK);
}
s32 sxe_cli_cdev_register(void)
{
s32 ret;
ret = alloc_chrdev_region(&sxe_cdev_major, 0, SXE_MAX_DEVICES_NUM,
SXE_CHRDEV_NAME);
if (ret) {
LOG_ERROR("alloc cdev number failed\n");
goto l_alloc_cdev_failed;
}
#ifdef CLASS_CREATE_NEED_1_PARAM
sxe_cdev_class = class_create(SXE_CHRDEV_CLASS_NAME);
#else
sxe_cdev_class = class_create(THIS_MODULE, SXE_CHRDEV_CLASS_NAME);
#endif
if (IS_ERR(sxe_cdev_class)) {
ret = PTR_ERR(sxe_cdev_class);
LOG_ERROR("create cdev class failed\n");
goto l_create_class_failed;
}
mutex_init(&sxe_minor_lock);
return SXE_SUCCESS;
l_create_class_failed:
unregister_chrdev_region(sxe_cdev_major, SXE_MAX_DEVICES_NUM);
l_alloc_cdev_failed:
return ret;
}
void sxe_cli_cdev_unregister(void)
{
class_destroy(sxe_cdev_class);
unregister_chrdev_region(sxe_cdev_major, SXE_MAX_DEVICES_NUM);
idr_destroy(&sxe_minor_idr);
}
static s32 sxe_get_minor(s32 *dev_minor)
{
s32 ret = -ENOMEM;
mutex_lock(&sxe_minor_lock);
ret = idr_alloc(&sxe_minor_idr, NULL, 0, SXE_MAX_DEVICES_NUM,
GFP_KERNEL);
if (ret >= 0) {
*dev_minor = ret;
ret = 0;
}
mutex_unlock(&sxe_minor_lock);
return ret;
}
static void sxe_free_minor(s32 dev_minor)
{
mutex_lock(&sxe_minor_lock);
idr_remove(&sxe_minor_idr, dev_minor);
mutex_unlock(&sxe_minor_lock);
}
s32 sxe_cli_cdev_create(struct sxe_adapter *adapter)
{
s32 ret;
s32 dev_major, dev_minor;
struct sxe_pci_addr pci_addr;
ret = sxe_get_minor(&dev_minor);
if (ret) {
LOG_ERROR("cdev minor get failed, ret=%d\n", ret);
ret = -ENOMEM;
goto l_get_minor_failed;
}
dev_major = MAJOR(sxe_cdev_major);
adapter->cdev_info.dev_no = MKDEV(dev_major, dev_minor);
cdev_init(&adapter->cdev_info.cdev, &sxe_cdev_fops);
adapter->cdev_info.cdev.owner = THIS_MODULE;
adapter->cdev_info.cdev.ops = &sxe_cdev_fops;
ret = cdev_add(&adapter->cdev_info.cdev, adapter->cdev_info.dev_no, 1);
if (ret) {
LOG_ERROR_BDF("failed to add cdev dev_no=%ld\n",
(unsigned long)adapter->cdev_info.dev_no);
goto l_add_cdev_failed;
}
sxe_pci_addr_get(adapter->pdev, &pci_addr);
adapter->cdev_info.device =
device_create(sxe_cdev_class, NULL, adapter->cdev_info.dev_no,
NULL, SXE_CHRDEV_NAME "-%04x:%02x:%02x.%x",
pci_addr.domain, pci_addr.bus, pci_addr.deviceno,
pci_addr.devfn);
if (IS_ERR(adapter->cdev_info.device)) {
ret = PTR_ERR(adapter->cdev_info.device);
LOG_ERROR_BDF("failed to create device, dev_no=%ld\n",
(unsigned long)adapter->cdev_info.dev_no);
goto l_create_dev_failed;
}
LOG_INFO("create char dev[%p] dev_no[major:minor=%u:%u] on pci_dev[%p]\n"
"\tto net_dev[%p] belongs to class dev[%p] success\n",
&adapter->cdev_info.cdev, dev_major, dev_minor, adapter->pdev,
adapter->netdev, adapter->cdev_info.device);
return SXE_SUCCESS;
l_create_dev_failed:
cdev_del(&adapter->cdev_info.cdev);
l_add_cdev_failed:
sxe_free_minor(dev_minor);
l_get_minor_failed:
return ret;
}
void sxe_cli_cdev_delete(struct sxe_adapter *adapter)
{
s32 dev_minor;
dev_minor = MINOR(adapter->cdev_info.dev_no);
sxe_free_minor(dev_minor);
LOG_INFO("delete char dev[%p], dev_no[major:minor=%u:%u]\n",
&adapter->cdev_info.cdev, MAJOR(adapter->cdev_info.dev_no),
dev_minor);
device_destroy(sxe_cdev_class, adapter->cdev_info.dev_no);
cdev_del(&adapter->cdev_info.cdev);
}

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_host_cli.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_HOST_CLI_H__
#define __SXE_HOST_CLI_H__
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/cdev.h>
#include "sxe.h"
#include "sxe_cli.h"
#include "sxe_msg.h"
#include "drv_msg.h"
#define SXE_CHRDEV_NAME "sxe-cli"
#define SXE_MAX_DEVICES_NUM BIT(MINORBITS)
#define SXE_CHRDEV_CLASS_NAME SXE_CHRDEV_NAME
struct sxe_pci_addr {
s32 domain;
u8 bus;
u32 deviceno;
u32 devfn;
};
s32 sxe_cli_cdev_register(void);
void sxe_cli_cdev_unregister(void);
s32 sxe_cli_cdev_create(struct sxe_adapter *adapter);
void sxe_cli_cdev_delete(struct sxe_adapter *adapter);
#endif

View File

@ -0,0 +1,971 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_host_hdc.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/cpu.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
#include <linux/atomic.h>
#include "sxe.h"
#ifndef NO_NEED_SIGNAL_H
#include <linux/sched/signal.h>
#endif
#include "sxe_host_hdc.h"
#include "sxe_log.h"
#include "sxe_hw.h"
#include "sxe_msg.h"
#include "drv_msg.h"
static atomic_t hdc_available = ATOMIC_INIT(1);
static DEFINE_PER_CPU(union sxe_trace_info, sxe_trace_id);
#define TRACE_ID_CHIP_OUT_COUNT_MASK 0x000FFFFFFFFFFFFFLLU
#define TRACE_ID_CHIP_OUT_CPUID_MASK 0x7FFLLU
#define SXE_HDC_RETRY_CNT (250)
#define SXE_HDC_RETRY_ITR (10)
#define NS_TO_MS_UNIT (1000000)
#ifdef DEFINE_SEMAPHORE_NEED_CNT
DEFINE_SEMAPHORE(g_hdc_sema, 1);
#else
DEFINE_SEMAPHORE(g_hdc_sema);
#endif
static void sxe_trace_id_alloc(u64 *trace_id)
{
union sxe_trace_info *id = NULL;
u64 trace_id_count = 0;
preempt_disable();
id = this_cpu_ptr(&sxe_trace_id);
trace_id_count = id->sxe_trace_id_param.count;
++trace_id_count;
id->sxe_trace_id_param.count =
(trace_id_count & TRACE_ID_CHIP_OUT_COUNT_MASK);
*trace_id = id->trace_id;
preempt_enable();
}
static void sxe_trace_id_init(void)
{
s32 cpu = 0;
union sxe_trace_info *id = NULL;
for_each_possible_cpu(cpu) {
id = &per_cpu(sxe_trace_id, cpu);
id->sxe_trace_id_param.cpu_id =
(cpu & TRACE_ID_CHIP_OUT_CPUID_MASK);
id->sxe_trace_id_param.count = 0;
}
}
static s32 sxe_cli_fw_time_sync(struct sxe_adapter *adapter)
{
s32 ret;
struct sxe_driver_cmd cmd;
u64 timestamp = ktime_get_real_ns();
struct sxe_hw *hw = &adapter->hw;
timestamp = timestamp / NS_TO_MS_UNIT;
LOG_INFO_BDF("sync time= %llu ms\n", timestamp);
cmd.req = &timestamp;
cmd.req_len = sizeof(timestamp);
cmd.resp = NULL;
cmd.resp_len = 0;
cmd.trace_id = 0;
cmd.opcode = SXE_CMD_TINE_SYNC;
cmd.is_interruptible = true;
ret = sxe_driver_cmd_trans(hw, &cmd);
if (ret) {
LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync,\n"
"\tfailed count=%u\n",
ret, adapter->hdc_ctxt.time_sync_failed);
adapter->hdc_ctxt.time_sync_failed++;
}
return ret;
}
s32 sxe_host_to_fw_time_sync(struct sxe_adapter *adapter)
{
s32 ret = 0;
s32 ret_v;
u32 status;
struct sxe_hw *hw = &adapter->hw;
status = hw->hdc.ops->fw_status_get(hw);
if (status != SXE_FW_START_STATE_FINISHED) {
LOG_ERROR_BDF("fw[%p] status[0x%x] is not good,\n"
"\tand time_sync_failed=%u\n",
hw, status, adapter->hdc_ctxt.time_sync_failed);
adapter->hdc_ctxt.time_sync_failed++;
ret = -SXE_FW_STATUS_ERR;
goto l_ret;
}
ret_v = sxe_cli_fw_time_sync(adapter);
if (ret_v) {
LOG_WARN_BDF("fw time sync failed, ret_v=%d\n", ret_v);
goto l_ret;
}
l_ret:
return ret;
}
void sxe_time_sync_handler(struct work_struct *work)
{
s32 ret;
struct sxe_adapter *adapter =
container_of(work, struct sxe_adapter, hdc_ctxt.time_sync_work);
ret = sxe_host_to_fw_time_sync(adapter);
if (ret)
LOG_ERROR_BDF("time sync handler err, ret=%d\n", ret);
}
struct semaphore *sxe_hdc_sema_get(void)
{
return &g_hdc_sema;
}
void sxe_hdc_available_set(s32 value)
{
atomic_set(&hdc_available, value);
}
void sxe_hdc_channel_init(struct sxe_hdc_context *hdc_ctxt)
{
sxe_trace_id_init();
init_completion(&hdc_ctxt->sync_done);
INIT_WORK(&hdc_ctxt->time_sync_work, sxe_time_sync_handler);
sxe_hdc_available_set(1);
hdc_ctxt->time_sync_failed = 0;
}
void sxe_hdc_channel_destroy(struct sxe_hw *hw)
{
sxe_hdc_available_set(0);
hw->hdc.ops->resource_clean(hw);
}
static inline s32 sxe_hdc_lock_get(struct sxe_hw *hw)
{
s32 ret = SXE_HDC_FALSE;
struct sxe_adapter *adapter = hw->adapter;
if (atomic_read(&hdc_available))
ret = hw->hdc.ops->pf_lock_get(hw, SXE_HDC_TRYLOCK_MAX);
else
LOG_ERROR_BDF("hdc channel not available\n");
return ret;
}
static inline void sxe_hdc_lock_release(struct sxe_hw *hw)
{
hw->hdc.ops->pf_lock_release(hw, SXE_HDC_RELEASELOCK_MAX);
}
static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout,
bool is_interruptible)
{
s32 ret = 0;
u32 i;
bool fw_ov = false;
struct sxe_adapter *adapter = hw->adapter;
if (atomic_read(&hdc_available)) {
for (i = 0; i < timeout; i++) {
fw_ov = hw->hdc.ops->is_fw_over_set(hw);
if (fw_ov)
break;
if (is_interruptible) {
if (msleep_interruptible(SXE_HDC_RETRY_ITR)) {
ret = -EINTR;
LOG_DEV_INFO("interrupted, exit polling\n");
goto l_ret;
}
} else {
msleep(SXE_HDC_RETRY_ITR);
}
}
if (i >= timeout) {
LOG_ERROR_BDF("poll fw_ov timeout...\n");
ret = -SXE_ERR_HDC_FW_OV_TIMEOUT;
goto l_ret;
}
hw->hdc.ops->fw_ov_clear(hw);
ret = 0;
} else {
ret = SXE_HDC_FALSE;
LOG_ERROR_BDF("hdc channel not available\n");
}
l_ret:
return ret;
}
#ifdef SXE_NEED_PROCESS_CANCEL
static inline bool is_interrupt_signal(struct task_struct *task)
{
bool is_inter = false;
if (sigismember(&task->pending.signal, SIGINT) ||
sigismember(&task->pending.signal, SIGKILL) ||
sigismember(&task->pending.signal, SIGQUIT)) {
is_inter = true;
goto l_ret;
}
l_ret:
return is_inter;
}
#endif
void sxe_hdc_irq_handler(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
u32 irq_event = hw->hdc.ops->irq_event_get(hw);
hw->irq.ops->specific_irq_disable(hw, SXE_EIMC_HDC);
LOG_DEBUG_BDF("hdc irq interrupt coming\n");
if (irq_event & MSI_EVT_HDC_FWOV) {
LOG_DEBUG_BDF("hdc fw over event occur\n");
hw->hdc.ops->irq_event_clear(hw, MSI_EVT_HDC_FWOV);
hw->hdc.ops->fw_ov_clear(hw);
complete(&adapter->hdc_ctxt.sync_done);
}
if (irq_event & MSI_EVT_HDC_TIME_SYNC) {
LOG_DEBUG_BDF("hdc fw sync time event occur\n");
hw->hdc.ops->irq_event_clear(hw, MSI_EVT_HDC_TIME_SYNC);
schedule_work(&adapter->hdc_ctxt.time_sync_work);
}
}
static s32 sxe_wait_fw_ack(struct sxe_hw *hw, u64 trace_id)
{
s32 ret;
struct sxe_adapter *adapter = container_of(hw, struct sxe_adapter, hw);
while (1) {
ret = wait_for_completion_interruptible(&adapter->hdc_ctxt.sync_done);
if (ret == 0) {
LOG_DEBUG_BDF("cmd trace=0x%llx,\n"
"\twait_for_completion_interrupted success\n",
trace_id);
break;
}
ret = signal_pending(current);
if (!ret) {
LOG_DEBUG_BDF("cmd trace=0x%llx, no pending signal,\n"
"\tcontinue wait",
trace_id);
continue;
} else {
LOG_DEBUG_BDF("cmd trace=0x%llx got signal, default quit\n",
trace_id);
ret = -EINTR;
break;
}
#ifdef SXE_NEED_PROCESS_CANCEL
ret = is_interrupt_signal(current);
if (ret) {
LOG_DEBUG_BDF("cmd trace=0x%llx interrupted, need cancel\n",
trace_id);
ret = -EINTR;
break;
}
LOG_DEBUG_BDF("cmd trace=0x%llx got other signal, ignore\n",
trace_id);
#endif
}
return ret;
}
static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id,
union hdcheader *pkt_header, bool use_msi,
bool is_interruptible)
{
s32 ret = 0;
u32 timeout = SXE_HDC_WAIT_TIME;
struct sxe_adapter *adapter = hw->adapter;
pkt_header->dw0 = 0;
pkt_header->head.errcode = PKG_ERR_OTHER;
LOG_INFO_BDF("trace_id=0x%llx hdc cmd ack get start, mode=%s\n",
trace_id, use_msi ? "msi inter" : "polling");
if (use_msi)
ret = sxe_wait_fw_ack(hw, trace_id);
else
ret = sxe_poll_fw_ack(hw, timeout, is_interruptible);
if (ret) {
LOG_ERROR_BDF("get fw ack failed, mode=%s ret=%d\n",
use_msi ? "msi inter" : "polling", ret);
goto l_out;
}
pkt_header->dw0 = hw->hdc.ops->fw_ack_header_rcv(hw);
if (pkt_header->head.errcode == PKG_ERR_PKG_SKIP) {
ret = -SXE_HDC_PKG_SKIP_ERR;
goto l_out;
} else if (pkt_header->head.errcode != PKG_OK) {
ret = -SXE_HDC_PKG_OTHER_ERR;
goto l_out;
}
l_out:
LOG_INFO_BDF("trace_id=0x%llx hdc cmd ack get end ret=%d\n", trace_id,
ret);
return ret;
}
static void hdc_packet_header_fill(union hdcheader *pkt_header, u8 pkt_index,
u16 total_len, u16 pkt_num, u8 is_read,
bool use_msi)
{
u16 pkt_len = 0;
pkt_header->dw0 = 0;
pkt_header->head.pid = (is_read == 0) ? pkt_index : (pkt_index - 1);
pkt_header->head.totallen = SXE_HDC_LEN_TO_REG(total_len);
if (pkt_index == 0 && is_read == 0)
pkt_header->head.startpkg = SXE_HDC_BIT_1;
if (pkt_index == (pkt_num - 1)) {
pkt_header->head.endpkg = SXE_HDC_BIT_1;
pkt_len = total_len - (DWORD_NUM * (pkt_num - 1));
} else {
pkt_len = DWORD_NUM;
}
pkt_header->head.len = SXE_HDC_LEN_TO_REG(pkt_len);
pkt_header->head.isrd = is_read;
if (use_msi)
pkt_header->head.msi = 1;
}
static inline void hdc_channel_clear(struct sxe_hw *hw)
{
hw->hdc.ops->fw_ov_clear(hw);
}
static inline void hdc_packet_send_done(struct sxe_hw *hw)
{
hw->hdc.ops->packet_send_done(hw);
}
static inline void hdc_packet_header_send(struct sxe_hw *hw, u32 header)
{
hw->hdc.ops->packet_header_send(hw, header);
}
static inline void hdc_packet_data_dword_send(struct sxe_hw *hw,
u16 dword_index, u32 value)
{
hw->hdc.ops->packet_data_dword_send(hw, dword_index, value);
}
static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
union hdcheader *pkt_header, u8 *data, u16 data_len)
{
u16 dw_idx = 0;
u16 pkt_len = 0;
u16 offset = 0;
u32 pkg_data = 0;
struct sxe_adapter *adapter = hw->adapter;
LOG_DEBUG_BDF("hw_addr[%p] trace_id=0x%llx send pkt pkg_header[0x%x],\n"
"\tdata_addr[%p], data_len[%u]\n",
hw, trace_id, pkt_header->dw0, data, data_len);
hdc_packet_header_send(hw, pkt_header->dw0);
if (!data || data_len == 0)
goto l_send_done;
pkt_len = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
for (dw_idx = 0; dw_idx < pkt_len; dw_idx++) {
pkg_data = 0;
offset = dw_idx * BYTE_PER_DWORD;
if (pkt_header->head.endpkg == SXE_HDC_BIT_1 &&
dw_idx == (pkt_len - 1) &&
data_len % BYTE_PER_DWORD != 0)
memcpy((u8 *)&pkg_data, data + offset,
data_len % BYTE_PER_DWORD);
else
pkg_data = *(u32 *)(data + offset);
LOG_DEBUG_BDF("trace_id=0x%llx send data to reg[%u] dword[0x%x]\n",
trace_id, dw_idx, pkg_data);
hdc_packet_data_dword_send(hw, dw_idx, pkg_data);
}
l_send_done:
hdc_channel_clear(hw);
hdc_packet_send_done(hw);
}
static inline u32 hdc_packet_data_dword_rcv(struct sxe_hw *hw, u16 dword_index)
{
return hw->hdc.ops->packet_data_dword_rcv(hw, dword_index);
}
static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id,
union hdcheader *pkt_header, u8 *out_data,
u16 out_len)
{
u16 dw_idx = 0;
u16 dw_num = 0;
u16 offset = 0;
u32 pkt_data;
struct sxe_adapter *adapter = hw->adapter;
dw_num = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
for (dw_idx = 0; dw_idx < dw_num; dw_idx++) {
pkt_data = hdc_packet_data_dword_rcv(hw, dw_idx);
offset = dw_idx * BYTE_PER_DWORD;
LOG_DEBUG_BDF("trace_id=0x%llx get data from reg[%u] dword=0x%x\n",
trace_id, dw_idx, pkt_data);
if (pkt_header->head.endpkg == SXE_HDC_BIT_1 &&
dw_idx == (dw_num - 1) &&
out_len % BYTE_PER_DWORD != 0)
memcpy(out_data + offset, (u8 *)&pkt_data,
out_len % BYTE_PER_DWORD);
else
*(u32 *)(out_data + offset) = pkt_data;
}
}
static s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id, u8 *in_data,
u16 in_len, bool use_msi, bool is_interruptible)
{
s32 ret = 0;
u32 total_len = 0;
u16 pkt_num = 0;
u16 index = 0;
u16 offset = 0;
union hdcheader pkt_header;
bool is_retry = false;
struct sxe_adapter *adapter = hw->adapter;
total_len = (in_len + BYTE_PER_DWORD - 1) / BYTE_PER_DWORD;
pkt_num = (in_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
LOG_DEBUG_BDF("hw[%p] trace_id=0x%llx req in_data[%p] in_len=%u,\n"
"\ttotal_len=%uDWORD, pkt_num = %u, mode=%s\n",
hw, trace_id, in_data, in_len, total_len, pkt_num,
use_msi ? "msi" : "polling");
for (index = 0; index < pkt_num; index++) {
LOG_DEBUG_BDF("trace_id=0x%llx fill pkg header[%p], pkg_index[%u],\n"
"\ttotal_Len[%u], pkg_num[%u], is_read[no]\n",
trace_id, &pkt_header, index, total_len, pkt_num);
hdc_packet_header_fill(&pkt_header, index, total_len, pkt_num,
0, use_msi);
offset = index * DWORD_NUM * BYTE_PER_DWORD;
hdc_packet_send(hw, trace_id, &pkt_header, in_data + offset,
in_len);
if (index == pkt_num - 1)
break;
ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, use_msi,
is_interruptible);
if (ret == -EINTR) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n",
trace_id);
goto l_out;
} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req ack\n"
"\tfailed, retry\n",
trace_id);
if (is_retry) {
ret = -SXE_HDC_RETRY_ERR;
goto l_out;
}
index--;
is_retry = true;
continue;
} else if (ret != SXE_HDC_SUCCESS) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req ack\n"
"\tfailed, ret=%d\n",
trace_id, ret);
ret = -SXE_HDC_RETRY_ERR;
goto l_out;
}
LOG_DEBUG_BDF("hdc cmd trace_id=0x%llx get req packet_index[%u]\n"
"\tack succeed header[0x%x]\n",
trace_id, index, pkt_header.dw0);
is_retry = false;
}
l_out:
return ret;
}
static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id, u8 *out_data,
u16 out_len, bool use_msi, bool is_interruptible)
{
s32 ret;
u32 req_dwords;
u32 resp_len;
u32 resp_dwords;
u16 pkt_num;
u16 index;
u16 offset;
union hdcheader pkt_header;
bool retry = false;
struct sxe_adapter *adapter = hw->adapter;
LOG_INFO_BDF("hdc trace_id=0x%llx req's last cmd ack get, mode=%s\n",
trace_id, use_msi ? "msi" : "polling");
ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, use_msi,
is_interruptible);
if (ret == -EINTR) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n",
trace_id);
goto l_out;
} else if (ret) {
LOG_ERROR_BDF("hdc trace_id=0x%llx ack get failed, ret=%d\n",
trace_id, ret);
ret = -SXE_HDC_RETRY_ERR;
goto l_out;
}
LOG_INFO_BDF("hdc trace_id=0x%llx req's last cmd ack get\n"
"\tsucceed header[0x%x]\n",
trace_id, pkt_header.dw0);
if (!pkt_header.head.startpkg) {
ret = -SXE_HDC_RETRY_ERR;
LOG_ERROR_BDF("trace_id=0x%llx ack header has error:\n"
"\tnot set start bit\n",
trace_id);
goto l_out;
}
req_dwords = (out_len + BYTE_PER_DWORD - 1) / BYTE_PER_DWORD;
resp_dwords = SXE_HDC_LEN_FROM_REG(pkt_header.head.totallen);
if (resp_dwords > req_dwords) {
ret = -SXE_HDC_RETRY_ERR;
LOG_ERROR_BDF("trace_id=0x%llx rsv len check failed:\n"
"\tresp_dwords=%u, req_dwords=%u\n",
trace_id, resp_dwords, req_dwords);
goto l_out;
}
resp_len = resp_dwords << DWORD_TO_BYTE_SHIFT;
LOG_INFO_BDF("outlen = %u bytes, resp_len = %u bytes\n", out_len,
resp_len);
if (resp_len > out_len)
resp_len = out_len;
hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data, resp_len);
pkt_num = (resp_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
for (index = 1; index < pkt_num; index++) {
LOG_DEBUG_BDF("trace_id=0x%llx fill pkg header[%p], pkg_index[%u],\n"
"\ttotal_Len[%u], pkg_num[%u], is_read[yes] use_msi=%s\n",
trace_id, &pkt_header, index, resp_dwords, pkt_num,
use_msi ? "yes" : "no");
hdc_packet_header_fill(&pkt_header, index, resp_dwords, pkt_num,
1, use_msi);
hdc_packet_send(hw, trace_id, &pkt_header, NULL, 0);
ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, use_msi,
is_interruptible);
if (ret == -EINTR) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n",
trace_id);
goto l_out;
} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
LOG_ERROR_BDF("trace_id=0x%llx hdc resp ack polling\n"
"\tfailed, ret=%d\n",
trace_id, ret);
if (retry) {
ret = -SXE_HDC_RETRY_ERR;
goto l_out;
}
index--;
retry = true;
continue;
} else if (ret != SXE_HDC_SUCCESS) {
LOG_ERROR_BDF("trace_id=0x%llx hdc resp ack polling\n"
"\tfailed, ret=%d\n",
trace_id, ret);
ret = -SXE_HDC_RETRY_ERR;
goto l_out;
}
LOG_INFO_BDF("hdc trace_id=0x%llx resp pkt[%u] get\n"
"\tsucceed header[0x%x]\n",
trace_id, index, pkt_header.dw0);
retry = false;
offset = index * DWORD_NUM * BYTE_PER_DWORD;
hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data + offset,
resp_len);
}
l_out:
return ret;
}
static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
struct sxe_hdc_trans_info *trans_info,
bool use_msi, bool is_interruptible)
{
s32 ret = SXE_SUCCESS;
u32 status;
struct sxe_adapter *adapter = hw->adapter;
u32 channel_state;
status = hw->hdc.ops->fw_status_get(hw);
if (status != SXE_FW_START_STATE_FINISHED) {
LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n", hw, status);
ret = -SXE_FW_STATUS_ERR;
goto l_ret;
}
channel_state = hw->hdc.ops->channel_state_get(hw);
if (channel_state != SXE_FW_HDC_TRANSACTION_IDLE) {
LOG_ERROR_BDF("hdc channel state is busy\n");
ret = -SXE_HDC_RETRY_ERR;
goto l_ret;
}
ret = sxe_hdc_lock_get(hw);
if (ret) {
LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx get hdc lock fail,\n"
"\tret=%d\n",
hw, trace_id, ret);
ret = -SXE_HDC_RETRY_ERR;
goto l_ret;
}
ret = hdc_req_process(hw, trace_id, trans_info->in.data,
trans_info->in.len, use_msi, is_interruptible);
if (ret) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req process\n"
"\tfailed, ret=%d\n",
trace_id, ret);
goto l_hdc_lock_release;
}
ret = hdc_resp_process(hw, trace_id, trans_info->out.data,
trans_info->out.len, use_msi, is_interruptible);
if (ret) {
LOG_ERROR_BDF("hdc cmd trace_id=0x%llx resp process\n"
"\tfailed, ret=%d\n",
trace_id, ret);
}
l_hdc_lock_release:
sxe_hdc_lock_release(hw);
l_ret:
return ret;
}
static s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
struct sxe_hdc_trans_info *trans_info,
bool use_msi, bool is_interruptible)
{
s32 ret;
u8 retry_idx;
struct sxe_adapter *adapter = hw->adapter;
LOG_DEBUG_BDF("hw[%p] %s cmd trace=0x%llx get use sema = %p, count=%u\n",
hw, use_msi ? "driver" : "user", trace_id, sxe_hdc_sema_get(),
sxe_hdc_sema_get()->count);
if (is_interruptible) {
ret = down_interruptible(sxe_hdc_sema_get());
if (ret) {
ret = -EINTR;
LOG_WARN_BDF("hw[%p] hdc concurrency full\n", hw);
goto l_ret;
}
} else {
down(sxe_hdc_sema_get());
}
for (retry_idx = 0; retry_idx < SXE_HDC_RETRY_CNT; retry_idx++) {
ret = sxe_hdc_packet_trans(hw, trace_id, trans_info, use_msi,
is_interruptible);
if (ret == SXE_SUCCESS) {
goto l_up;
} else if (ret == -SXE_HDC_RETRY_ERR) {
if (is_interruptible) {
if (msleep_interruptible(SXE_HDC_RETRY_ITR)) {
ret = -EINTR;
LOG_ERROR_BDF("interrupted, exit polling\n");
goto l_up;
}
} else {
msleep(SXE_HDC_RETRY_ITR);
}
continue;
} else {
LOG_ERROR_BDF("sxe hdc packet trace_id=0x%llx\n"
"\ttrans error, ret=%d\n",
trace_id, ret);
ret = -EFAULT;
goto l_up;
}
}
l_up:
LOG_DEBUG_BDF("hw[%p] %s cmd trace=0x%llx up sema = %p, count=%u\n", hw,
use_msi ? "driver" : "user", trace_id, sxe_hdc_sema_get(),
sxe_hdc_sema_get()->count);
up(sxe_hdc_sema_get());
l_ret:
if (ret == -SXE_HDC_RETRY_ERR)
ret = -EFAULT;
return ret;
}
static void sxe_cmd_hdr_init(struct sxe_hdc_cmd_hdr *cmd_hdr, u8 cmd_type)
{
cmd_hdr->cmd_type = cmd_type;
cmd_hdr->cmd_sub_type = 0;
}
static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg, u16 opcode,
u64 trace_id, void *req_data, u16 req_len)
{
LOG_DEBUG("cmd[opcode=0x%x], trace=0x%llx, req_data_len=%u start init\n",
opcode, trace_id, req_len);
msg->opcode = opcode;
msg->length.req_len = SXE_HDC_MSG_HDR_SIZE + req_len;
msg->traceid = trace_id;
if (req_data && req_len != 0)
memcpy(msg->body, (u8 *)req_data, req_len);
}
static void sxe_hdc_trans_info_init(struct sxe_hdc_trans_info *trans_info,
u8 *in_data_buf, u16 in_len,
u8 *out_data_buf, u16 out_len)
{
trans_info->in.data = in_data_buf;
trans_info->in.len = in_len;
trans_info->out.data = out_data_buf;
trans_info->out.len = out_len;
}
s32 sxe_driver_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd)
{
s32 ret = SXE_SUCCESS;
struct sxe_hdc_cmd_hdr *cmd_hdr;
struct sxe_hdc_drv_cmd_msg *msg;
struct sxe_hdc_drv_cmd_msg *ack;
struct sxe_hdc_trans_info trans_info;
struct sxe_adapter *adapter = hw->adapter;
void *req_data = cmd->req, *resp_data = cmd->resp;
u16 opcode = cmd->opcode, req_len = cmd->req_len,
resp_len = cmd->resp_len;
u8 *in_data_buf;
u8 *out_data_buf;
u16 in_len;
u16 out_len;
u64 trace_id = 0;
u16 ack_data_len;
in_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + req_len;
out_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + resp_len;
sxe_trace_id_alloc(&trace_id);
in_data_buf = kzalloc(in_len, GFP_KERNEL);
if (!in_data_buf) {
LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc indata\n"
"\tmem len[%u] failed\n",
trace_id, in_len);
ret = -ENOMEM;
goto l_ret;
}
out_data_buf = kzalloc(out_len, GFP_KERNEL);
if (!out_data_buf) {
LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc out_data\n"
"\tmem len[%u] failed\n",
trace_id, out_len);
ret = -ENOMEM;
goto l_in_buf_free;
}
cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf;
sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_DRV);
msg = (struct sxe_hdc_drv_cmd_msg *)((u8 *)in_data_buf +
SXE_HDC_CMD_HDR_SIZE);
sxe_driver_cmd_msg_init(msg, opcode, trace_id, req_data, req_len);
LOG_DEBUG_BDF("trans drv cmd:trace_id=0x%llx, opcode[0x%x],\n"
"\tinlen=%u, out_len=%u\n",
trace_id, opcode, in_len, out_len);
sxe_hdc_trans_info_init(&trans_info, in_data_buf, in_len, out_data_buf,
out_len);
ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info, false,
cmd->is_interruptible);
if (ret) {
LOG_DEV_DEBUG("hdc cmd[0x%x] trace_id=0x%llx process\n"
"\tfailed, ret=%d\n",
opcode, trace_id, ret);
goto l_out_buf_free;
}
ack = (struct sxe_hdc_drv_cmd_msg *)((u8 *)out_data_buf +
SXE_HDC_CMD_HDR_SIZE);
if (ack->errcode) {
LOG_DEV_DEBUG("driver get hdc ack failed trace_id=0x%llx, err=%d\n",
trace_id, ack->errcode);
ret = -SXE_ERR_CLI_FAILED;
goto l_out_buf_free;
}
ack_data_len = ack->length.ack_len - SXE_HDC_MSG_HDR_SIZE;
if (resp_len != ack_data_len) {
LOG_DEV_DEBUG("ack trace_id=0x%llx data len[%u]\n"
"\tand resp_len[%u] dont match\n",
trace_id, ack_data_len, resp_len);
ret = -SXE_ERR_CLI_FAILED;
goto l_out_buf_free;
}
if (resp_len != 0)
memcpy(resp_data, ack->body, resp_len);
LOG_DEBUG_BDF("driver get hdc ack trace_id=0x%llx,\n"
"\tack_len=%u, ack_data_len=%u\n",
trace_id, ack->length.ack_len, ack_data_len);
l_out_buf_free:
kfree(out_data_buf);
l_in_buf_free:
kfree(in_data_buf);
l_ret:
return ret;
}
s32 sxe_cli_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd)
{
s32 ret = SXE_SUCCESS;
struct sxe_hdc_cmd_hdr *cmd_hdr;
struct sxe_hdc_trans_info trans_info;
struct sxe_adapter *adapter = hw->adapter;
u64 trace_id = cmd->trace_id;
u16 in_len = cmd->req_len, out_len = cmd->resp_len;
u8 *in_data = cmd->req;
u8 *out_data = cmd->resp;
u8 *in_data_buf;
u8 *out_data_buf;
u16 in_buf_len = in_len + SXE_HDC_CMD_HDR_SIZE;
u16 out_buf_len = out_len + SXE_HDC_CMD_HDR_SIZE;
in_data_buf = kzalloc(in_buf_len, GFP_KERNEL);
if (!in_data_buf) {
LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc indata\n"
"\tmem len[%u] failed\n",
trace_id, in_buf_len);
ret = -ENOMEM;
goto l_ret;
}
out_data_buf = kzalloc(out_buf_len, GFP_KERNEL);
if (!out_data_buf) {
LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc out_data\n"
"\tmem len[%u] failed\n",
trace_id, out_buf_len);
ret = -ENOMEM;
goto l_in_buf_free;
}
if (copy_from_user(in_data_buf + SXE_HDC_CMD_HDR_SIZE,
(void __user *)in_data, in_len)) {
LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx copy from user err\n",
hw, trace_id);
ret = -EFAULT;
goto l_out_buf_free;
}
cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf;
sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_CLI);
LOG_DEBUG_BDF("trans cli cmd:trace_id=0x%llx,, inlen=%u, out_len=%u\n",
trace_id, in_len, out_len);
sxe_hdc_trans_info_init(&trans_info, in_data_buf, in_buf_len,
out_data_buf, out_buf_len);
ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info, false,
cmd->is_interruptible);
if (ret) {
LOG_DEV_DEBUG("hdc cmd trace_id=0x%llx hdc packet trans\n"
"\tfailed, ret=%d\n",
trace_id, ret);
goto l_out_buf_free;
}
if (copy_to_user((void __user *)out_data,
out_data_buf + SXE_HDC_CMD_HDR_SIZE, out_len)) {
LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx copy to user err\n",
hw, trace_id);
ret = -EFAULT;
}
l_out_buf_free:
kfree(out_data_buf);
l_in_buf_free:
kfree(in_data_buf);
l_ret:
return ret;
}

View File

@ -0,0 +1,92 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_host_hdc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_HOST_HDC_H__
#define __SXE_HOST_HDC_H__
#include "sxe_hdc.h"
#include "sxe_hw.h"
#include "sxe.h"
#define SXE_SUCCESS (0)
#define SXE_FAILED (512)
#define SXE_HDC_SUCCESS 0
#define SXE_HDC_FALSE SXE_ERR_HDC(1)
#define SXE_HDC_INVAL_PARAM SXE_ERR_HDC(2)
#define SXE_HDC_BUSY SXE_ERR_HDC(3)
#define SXE_HDC_FW_OPS_FAILED SXE_ERR_HDC(4)
#define SXE_HDC_FW_OV_TIMEOUT SXE_ERR_HDC(5)
#define SXE_HDC_REQ_ACK_HEAD_ERR SXE_ERR_HDC(6)
#define SXE_HDC_REQ_ACK_TLEN_ERR SXE_ERR_HDC(7)
#define SXE_HDC_PKG_SKIP_ERR SXE_ERR_HDC(8)
#define SXE_HDC_PKG_OTHER_ERR SXE_ERR_HDC(9)
#define SXE_HDC_RETRY_ERR SXE_ERR_HDC(10)
#define SXE_FW_STATUS_ERR SXE_ERR_HDC(11)
#define SXE_HDC_TRYLOCK_MAX 200
#define SXE_HDC_RELEASELOCK_MAX 20
#define SXE_HDC_TEST_POLL_LOCK_MAX 10
#define SXE_HDC_WAIT_TIME 200
#define SXE_HDC_BIT_1 0x1
#define BYTE_PER_DWORD (4)
#define DWORD_TO_BYTE_SHIFT (2)
union sxe_trace_info {
u64 trace_id;
struct {
u64 count : 53;
u64 cpu_id : 11;
} sxe_trace_id_param;
};
struct sxe_hdc_data_info {
u8 *data;
u16 len;
};
struct sxe_hdc_trans_info {
struct sxe_hdc_data_info in;
struct sxe_hdc_data_info out;
};
struct sxe_driver_cmd {
void *req;
void *resp;
u64 trace_id;
bool is_interruptible;
u16 opcode;
u16 req_len;
u16 resp_len;
};
s32 sxe_driver_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd);
s32 sxe_cli_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd);
void sxe_hdc_channel_init(struct sxe_hdc_context *hdc_ctxt);
struct semaphore *sxe_hdc_sema_get(void);
void sxe_hdc_irq_handler(struct sxe_adapter *adapter);
s32 sxe_host_to_fw_time_sync(struct sxe_adapter *adapter);
void sxe_hdc_channel_destroy(struct sxe_hw *hw);
void sxe_hdc_available_set(s32 value);
void sxe_time_sync_handler(struct work_struct *work);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ipsec.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_IPSEC_H__
#define __SXE_IPSEC_H__
#include "sxe_ring.h"
#ifdef CONFIG_SXE_FPGA_SINGLE_PORT
#undef SXE_IPSEC_MAX_SA_COUNT
#undef SXE_IPSEC_MAX_RX_IP_COUNT
#undef SXE_IPSEC_BASE_TX_INDEX
#define SXE_IPSEC_MAX_SA_COUNT 24
#define SXE_IPSEC_MAX_RX_IP_COUNT 8
#define SXE_IPSEC_BASE_TX_INDEX SXE_IPSEC_MAX_SA_COUNT
#endif
#define SXE_IPSEC_SA_CNT_MAX (1024)
#define SXE_IPSEC_IP_CNT_MAX (128)
#define SXE_IPSEC_RX_INDEX_BASE (0)
#define SXE_IPSEC_TX_INDEX_BASE (SXE_IPSEC_SA_CNT_MAX)
#define SXE_IPSEC_AUTH_BIT_LEN (128)
#define SXE_IPSEC_SA_ENTRY_USED (0x1)
#define SXE_IPSEC_IP_ENTRY_USED (0x1)
#define SXE_IPSEC_IP_LEN (4)
#define SXE_IPSEC_KEY_LEN (4)
#define SXE_IPSEC_KEY_SALT_BIT_LEN (160)
#define SXE_IPSEC_KEY_BIT_LEN (128)
#define SXE_IPSEC_KEY_SALT_BYTE_LEN (SXE_IPSEC_KEY_SALT_BIT_LEN / 8)
#define SXE_IPSEC_KEY_BYTE_LEN (SXE_IPSEC_KEY_BIT_LEN / 8)
#define SXE_IPSEC_PADLEN_OFFSET (SXE_IPSEC_KEY_BYTE_LEN + 2)
#define SXE_IPV4_ADDR_SIZE (4)
#define SXE_IPV6_ADDR_SIZE (16)
#define SXE_IPSEC_RXMOD_VALID 0x00000001
#define SXE_IPSEC_RXMOD_PROTO_ESP 0x00000004
#define SXE_IPSEC_RXMOD_DECRYPT 0x00000008
#define SXE_IPSEC_RXMOD_IPV6 0x00000010
#define SXE_IPSEC_RXTXMOD_VF 0x00000020
struct sxe_tx_sa {
struct xfrm_state *xs;
u32 key[SXE_IPSEC_KEY_LEN];
u32 salt;
u32 mode;
bool encrypt;
u16 vf_idx;
unsigned long status;
};
struct sxe_rx_sa {
struct hlist_node hlist;
struct xfrm_state *xs;
u32 key[SXE_IPSEC_KEY_LEN];
u32 salt;
__be32 ip_addr[SXE_IPSEC_IP_LEN];
u32 mode;
u8 ip_idx;
u16 vf_idx;
bool decrypt;
unsigned long status;
};
struct sxe_rx_ip {
__be32 ip_addr[SXE_IPSEC_IP_LEN];
u16 ref_cnt;
unsigned long status;
};
struct sxe_ipsec_context {
u16 rx_sa_cnt;
u16 tx_sa_cnt;
atomic64_t rx_ipsec;
struct sxe_rx_ip *ip_table;
struct sxe_rx_sa *rx_table;
struct sxe_tx_sa *tx_table;
DECLARE_HASHTABLE(rx_table_list, 10);
};
s32 sxe_tx_ipsec_offload(struct sxe_ring *tx_ring, struct sxe_tx_buffer *first,
struct sxe_tx_context_desc *ctxt_desc);
void sxe_rx_ipsec_proc(struct sxe_ring *tx_ring, union sxe_rx_data_desc *desc,
struct sk_buff *skb);
void sxe_ipsec_offload_init(struct sxe_adapter *adapter);
void sxe_ipsec_table_restore(struct sxe_adapter *adapter);
void sxe_ipsec_offload_exit(struct sxe_adapter *adapter);
s32 sxe_vf_ipsec_add(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx);
s32 sxe_vf_ipsec_del(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx);
void sxe_vf_ipsec_entry_clear(struct sxe_adapter *adapter, u32 vf_idx);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,138 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_irq.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_IRQ_H__
#define __SXE_IRQ_H__
#include "sxe_ring.h"
struct sxe_adapter;
struct ethtool_coalesce;
#define SXE_MSIX_IRQ_MAX_NUM (64)
#define SXE_EVENT_IRQ_NUM (1)
#define SXE_RING_IRQ_MIN_NUM (1)
#define SXE_RING_IRQ_MAX_NUM (SXE_MSIX_IRQ_MAX_NUM)
#define SXE_MSIX_IRQ_MIN_NUM (SXE_EVENT_IRQ_NUM + SXE_RING_IRQ_MIN_NUM)
#define SXE_PCIE_MSIX_CAPS_OFFSET (0xB2)
#define SXE_PCIE_MSIX_ENTRY_MASK (0x7FF)
#define SXE_NAPI_WEIGHT (64)
#define SXE_IRQ_ITR_INC_MIN (2)
#define SXE_IRQ_ITR_MIN (10)
#define SXE_IRQ_ITR_MAX (126)
#define SXE_IRQ_ITR_LATENCY (0x80)
#define SXE_IRQ_ITR_BULK (0x00)
#define SXE_IRQ_ITR_MASK (0x00000FF8)
#define SXE_IRQ_BULK (0)
#define SXE_IRQ_ITR_12K (336)
#define SXE_IRQ_ITR_20K (200)
#define SXE_IRQ_ITR_100K (40)
#define SXE_IRQ_LRO_ITR_MIN (24)
#define SXE_IRQ_ITR_CONSTANT_MODE_VALUE (1)
#define SXE_IRQ_ITR_PKT_4 4
#define SXE_IRQ_ITR_PKT_48 48
#define SXE_IRQ_ITR_PKT_96 96
#define SXE_IRQ_ITR_PKT_256 256
#define SXE_IRQ_ITR_BYTES_9000 9000
enum sxe_irq_mode {
SXE_IRQ_MSIX_MODE = 0,
SXE_IRQ_MSI_MODE,
SXE_IRQ_INTX_MODE,
};
struct sxe_irq_rate {
unsigned long next_update;
unsigned int total_bytes;
unsigned int total_packets;
u16 irq_interval;
};
struct sxe_list {
struct sxe_ring *next;
u8 cnt;
};
struct sxe_tx_context {
struct sxe_list list;
struct sxe_ring *xdp_ring;
struct sxe_irq_rate irq_rate;
u16 work_limit;
};
struct sxe_rx_context {
struct sxe_irq_rate irq_rate;
struct sxe_list list;
};
struct sxe_irq_data {
struct sxe_adapter *adapter;
#ifdef SXE_TPH_CONFIGURE
s32 cpu;
#endif
u16 irq_idx;
u16 irq_interval;
struct sxe_tx_context tx;
struct sxe_rx_context rx;
struct napi_struct napi;
cpumask_t affinity_mask;
s32 numa_node;
struct rcu_head rcu;
s8 name[IFNAMSIZ + 16];
struct sxe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
struct sxe_irq_context {
struct msix_entry *msix_entries;
struct sxe_irq_data *irq_data[SXE_RING_IRQ_MAX_NUM];
/* in order to protect the data */
spinlock_t event_irq_lock;
u16 max_irq_num;
u16 ring_irq_num;
u16 total_irq_num;
u16 rx_irq_interval;
u16 tx_irq_interval;
};
int sxe_irq_configure(struct sxe_adapter *adapter);
int sxe_poll(struct napi_struct *napi, int weight);
void sxe_napi_disable(struct sxe_adapter *adapter);
void sxe_irq_release(struct sxe_adapter *adapter);
void sxe_hw_irq_disable(struct sxe_adapter *adapter);
void sxe_irq_ctxt_exit(struct sxe_adapter *adapter);
s32 sxe_irq_ctxt_init(struct sxe_adapter *adapter);
void sxe_hw_irq_configure(struct sxe_adapter *adapter);
s32 sxe_config_space_irq_num_get(struct sxe_adapter *adapter);
s32 sxe_irq_coalesce_set(struct net_device *netdev,
struct ethtool_coalesce *user);
s32 sxe_irq_coalesce_get(struct net_device *netdev,
struct ethtool_coalesce *user);
bool sxe_is_irq_msi_mode(void);
bool sxe_is_irq_intx_mode(void);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,681 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_monitor.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/moduleparam.h>
#include "sxe_hw.h"
#include "sxe_monitor.h"
#include "sxe_ptp.h"
#include "sxe_dcb.h"
#include "sxe_netdev.h"
#include "sxe_tx_proc.h"
#include "sxe_rx_proc.h"
#include "sxe_sriov.h"
#include "sxe_errno.h"
extern struct workqueue_struct *sxe_fnav_workqueue;
#define SXE_LINK_CHECK_WAIT_TIME (4 * HZ)
#define SXE_SFP_RESET_WAIT_TIME (2 * HZ)
#define SXE_CHECK_LINK_TIMER_PERIOD (HZ / 10)
#define SXE_NORMAL_TIMER_PERIOD (HZ * 2)
#ifdef SXE_SFP_DEBUG
static unsigned int sw_sfp_multi_gb_ms = SXE_SW_SFP_MULTI_GB_MS;
#ifndef SXE_TEST
module_param(sw_sfp_multi_gb_ms, uint, 0);
MODULE_PARM_DESC(sw_sfp_multi_gb_ms,
"Mask LOS_N interrupt(SDP1) time after active rate switching - default is 4000");
#endif
#endif
void sxe_task_timer_trigger(struct sxe_adapter *adapter)
{
set_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state);
LOG_DEBUG_BDF("trigger link_check subtask, state=%lx,\n"
"\tmonitor_state=%lx, is_up=%d\n",
adapter->state, adapter->monitor_ctxt.state,
adapter->link.is_up);
adapter->link.check_timeout = jiffies;
mod_timer(&adapter->monitor_ctxt.timer, jiffies);
}
void sxe_sfp_reset_task_submit(struct sxe_adapter *adapter)
{
set_bit(SXE_SFP_NEED_RESET, &adapter->monitor_ctxt.state);
LOG_INFO("trigger sfp_reset subtask\n");
adapter->link.sfp_reset_timeout = 0;
adapter->link.last_lkcfg_time = 0;
adapter->link.sfp_multispeed_time = 0;
}
void sxe_monitor_work_schedule(struct sxe_adapter *adapter)
{
struct workqueue_struct *wq = sxe_workqueue_get();
if (!test_bit(SXE_DOWN, &adapter->state) &&
!test_bit(SXE_REMOVING, &adapter->state) &&
!test_and_set_bit(SXE_MONITOR_WORK_SCHED,
&adapter->monitor_ctxt.state)) {
queue_work(wq, &adapter->monitor_ctxt.work);
}
}
static void sxe_timer_cb(struct timer_list *timer)
{
struct sxe_monitor_context *monitor =
container_of(timer, struct sxe_monitor_context, timer);
struct sxe_adapter *adapter =
container_of(monitor, struct sxe_adapter, monitor_ctxt);
unsigned long period;
if (test_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state) ||
test_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state)) {
period = SXE_CHECK_LINK_TIMER_PERIOD;
} else {
period = SXE_NORMAL_TIMER_PERIOD;
}
mod_timer(&adapter->monitor_ctxt.timer, period + jiffies);
sxe_monitor_work_schedule(adapter);
}
static void sxe_monitor_work_complete(struct sxe_adapter *adapter)
{
BUG_ON(!test_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state));
/* in order to force CPU ordering */
smp_mb__before_atomic();
clear_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state);
}
static void sxe_reset_work(struct sxe_adapter *adapter)
{
if (!test_and_clear_bit(SXE_RESET_REQUESTED,
&adapter->monitor_ctxt.state))
goto l_end;
rtnl_lock();
if (test_bit(SXE_DOWN, &adapter->state) ||
test_bit(SXE_REMOVING, &adapter->state) ||
test_bit(SXE_RESETTING, &adapter->state))
goto l_unlock;
LOG_DEV_ERR("reset adapter\n");
adapter->stats.sw.reset_work_trigger_cnt++;
sxe_hw_reinit(adapter);
l_unlock:
rtnl_unlock();
l_end:
;
}
static void sxe_stats_update_work(struct sxe_adapter *adapter)
{
if (test_bit(SXE_DOWN, &adapter->state) ||
test_bit(SXE_REMOVING, &adapter->state) ||
test_bit(SXE_RESETTING, &adapter->state))
goto l_end;
stats_lock(adapter);
sxe_stats_update(adapter);
stats_unlock(adapter);
l_end:
;
}
static void sxe_check_hang_work(struct sxe_adapter *adapter)
{
u32 i;
u64 eics = 0;
struct sxe_irq_data *irq_priv;
struct sxe_hw *hw = &adapter->hw;
struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring;
struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring;
if (test_bit(SXE_DOWN, &adapter->state) ||
test_bit(SXE_REMOVING, &adapter->state) ||
test_bit(SXE_RESETTING, &adapter->state))
goto l_end;
if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->tx_ring_ctxt.num; i++)
SXE_TX_HANG_CHECK_ACTIVE(tx_ring[i]);
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++)
SXE_TX_HANG_CHECK_ACTIVE(xdp_ring[i]);
}
if (!(adapter->cap & SXE_MSIX_ENABLED)) {
hw->irq.ops->event_irq_trigger(hw);
} else {
for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) {
irq_priv = adapter->irq_ctxt.irq_data[i];
if (irq_priv->tx.list.next || irq_priv->rx.list.next)
eics |= BIT_ULL(i);
}
hw->irq.ops->ring_irq_trigger(hw, eics);
}
l_end:
;
}
static void sxe_fc_configure(struct sxe_adapter *adapter)
{
bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable;
#ifdef SXE_DCB_CONFIGURE
if (adapter->dcb_ctxt.ieee_pfc)
pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en);
#endif
#ifdef SXE_DCB_CONFIGURE
if (!((adapter->cap & SXE_DCB_ENABLE) && pfc_en)) {
LOG_DEBUG_BDF("lfc configure\n");
sxe_fc_enable(adapter);
sxe_rx_drop_mode_set(adapter);
} else {
LOG_DEBUG_BDF("pfc configure\n");
sxe_dcb_pfc_configure(adapter);
}
#else
if (!((adapter->cap & SXE_DCB_ENABLE) && pfc_en)) {
LOG_DEBUG_BDF("lfc configure\n");
sxe_fc_enable(adapter);
sxe_rx_drop_mode_set(adapter);
}
#endif
}
static void sxe_vmac_configure(struct sxe_adapter *adapter)
{
sxe_fc_configure(adapter);
sxe_ptp_configure(adapter);
}
static void sxe_link_update(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
unsigned long flags;
if (!test_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state))
goto l_end;
sxe_link_info_get(adapter, &adapter->link.speed, &adapter->link.is_up);
LOG_DEBUG_BDF("link update, speed=%x, is_up=%d\n", adapter->link.speed,
adapter->link.is_up);
if (adapter->link.is_up)
sxe_vmac_configure(adapter);
if (adapter->link.is_up ||
time_after(jiffies, (adapter->link.check_timeout +
SXE_LINK_CHECK_WAIT_TIME))) {
clear_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state);
spin_lock_irqsave(&adapter->irq_ctxt.event_irq_lock, flags);
hw->irq.ops->specific_irq_enable(hw, SXE_EIMS_LSC);
spin_unlock_irqrestore(&adapter->irq_ctxt.event_irq_lock, flags);
LOG_DEBUG_BDF("clear link check requester, is_up=%d\n",
adapter->link.is_up);
}
l_end:
;
}
static void sxe_link_up_handle(struct sxe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
const char *speed_str;
if (netif_carrier_ok(netdev) &&
!test_bit(SXE_LINK_SPEED_CHANGE, &adapter->monitor_ctxt.state))
goto l_end;
clear_bit(SXE_LINK_SPEED_CHANGE, &adapter->monitor_ctxt.state);
switch (adapter->link.speed) {
case SXE_LINK_SPEED_10GB_FULL:
speed_str = "10 Gbps";
break;
case SXE_LINK_SPEED_1GB_FULL:
speed_str = "1 Gbps";
break;
case SXE_LINK_SPEED_100_FULL:
speed_str = "100 Mbps";
break;
case SXE_LINK_SPEED_10_FULL:
speed_str = "10 Mbps";
break;
default:
speed_str = "unknown speed";
break;
}
LOG_MSG_WARN(drv, "nic link is up, speed: %s\n", speed_str);
netif_carrier_on(netdev);
sxe_vf_rate_update(adapter);
netif_tx_wake_all_queues(adapter->netdev);
sxe_link_update_notify_vf_all(adapter);
l_end:
;
}
static void sxe_link_down_handle(struct sxe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
adapter->link.is_up = false;
adapter->link.speed = 0;
if (netif_carrier_ok(netdev)) {
LOG_MSG_WARN(drv, "nic link is down\n");
netif_carrier_off(netdev);
sxe_link_update_notify_vf_all(adapter);
}
if (sxe_tx_ring_pending(adapter) || sxe_vf_tx_pending(adapter)) {
LOG_MSG_WARN(drv,
"initiating reset to clear Tx work after link loss\n");
set_bit(SXE_RESET_REQUESTED, &adapter->monitor_ctxt.state);
}
}
static void sxe_detect_link_work(struct sxe_adapter *adapter)
{
carrier_lock(adapter);
if (test_bit(SXE_DOWN, &adapter->state) ||
test_bit(SXE_REMOVING, &adapter->state) ||
test_bit(SXE_RESETTING, &adapter->state)) {
carrier_unlock(adapter);
goto l_end;
}
sxe_link_update(adapter);
if (adapter->link.is_up)
sxe_link_up_handle(adapter);
else
sxe_link_down_handle(adapter);
carrier_unlock(adapter);
sxe_bad_vf_flr(adapter);
sxe_spoof_packets_check(adapter);
l_end:
;
}
static s32 sxe_fnav_all_sample_rules_del(struct sxe_adapter *adapter)
{
struct sxe_fnav_sample_filter *filter;
struct sxe_hw *hw = &adapter->hw;
struct hlist_node *tmp;
int bkt;
if (!adapter->fnav_ctxt.sample_rules_cnt)
return 0;
flush_workqueue(sxe_fnav_workqueue);
spin_lock(&adapter->fnav_ctxt.sample_lock);
hash_for_each_safe(adapter->fnav_ctxt.sample_list, bkt, tmp, filter,
hlist) {
hw->dbu.ops->fnav_single_sample_rule_del(hw, filter->hash);
hash_del(&filter->hlist);
kfree(filter);
}
adapter->fnav_ctxt.sample_rules_cnt = 0;
spin_unlock(&adapter->fnav_ctxt.sample_lock);
hw->dbu.ops->fnav_sample_stats_reinit(hw);
return 0;
}
#ifdef NEED_BOOTTIME_SECONDS
static inline time64_t ktime_get_boottime_seconds(void)
{
return ktime_divns(ktime_get_boottime(), NSEC_PER_SEC);
}
#endif
static void sxe_fnav_sample_reinit_work(struct sxe_adapter *adapter)
{
u32 i;
struct sxe_hw *hw = &adapter->hw;
unsigned long flags;
if (adapter->fnav_ctxt.fdir_overflow_time &&
(ktime_get_boottime_seconds() -
adapter->fnav_ctxt.fdir_overflow_time >
1)) {
adapter->fnav_ctxt.fdir_overflow_time = 0;
adapter->stats.sw.fnav_overflow++;
if (sxe_fnav_all_sample_rules_del(adapter) == 0) {
for (i = 0; i < adapter->tx_ring_ctxt.num; i++) {
set_bit(SXE_TX_FNAV_INIT_DONE,
&adapter->tx_ring_ctxt.ring[i]->state);
}
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) {
set_bit(SXE_TX_FNAV_INIT_DONE,
&adapter->xdp_ring_ctxt.ring[i]->state);
}
hw->irq.ops->pending_irq_write_clear(hw, SXE_EICR_FLOW_NAV);
spin_lock_irqsave(&adapter->irq_ctxt.event_irq_lock, flags);
hw->irq.ops->specific_irq_enable(hw, SXE_EIMS_FLOW_NAV);
spin_unlock_irqrestore(&adapter->irq_ctxt.event_irq_lock, flags);
adapter->fnav_ctxt.is_sample_table_overflowed = false;
} else {
LOG_MSG_ERR(probe,
"failed to finish FNAV re-initialization,\n"
"\tignored adding FNAV APP_TR filters\n");
}
goto l_ret;
}
if (!test_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state)) {
LOG_INFO_BDF("fnav not requires reinit\n");
goto l_ret;
}
clear_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state);
if (test_bit(SXE_DOWN, &adapter->state)) {
LOG_INFO_BDF("sxe state is down no need fnav reinit\n");
goto l_ret;
}
if (!(adapter->cap & SXE_FNAV_SAMPLE_ENABLE)) {
LOG_INFO_BDF("only sample fnav mode need reinit\n");
goto l_ret;
}
adapter->fnav_ctxt.fdir_overflow_time = ktime_get_boottime_seconds();
adapter->fnav_ctxt.is_sample_table_overflowed = true;
l_ret:
LOG_INFO_BDF("fnav reinit finish, and overflow=%llu\n",
adapter->stats.sw.fnav_overflow);
}
static void sxe_ptp_timer_check(struct sxe_adapter *adapter)
{
if (test_bit(SXE_PTP_RUNNING, &adapter->state)) {
sxe_ptp_overflow_check(adapter);
if (adapter->cap & SXE_RX_HWTSTAMP_IN_REGISTER)
sxe_ptp_rx_hang(adapter);
sxe_ptp_tx_hang(adapter);
}
}
static s32 sxe_hw_fault_handle_task(struct sxe_adapter *adapter)
{
s32 ret = 0;
if (sxe_is_hw_fault(&adapter->hw)) {
if (!test_bit(SXE_DOWN, &adapter->state)) {
rtnl_lock();
sxe_down(adapter);
rtnl_unlock();
}
LOG_ERROR_BDF("sxe nic fault\n");
ret = -EFAULT;
}
return ret;
}
static void sxe_sfp_reset_work(struct sxe_adapter *adapter)
{
s32 ret;
struct sxe_monitor_context *monitor = &adapter->monitor_ctxt;
if (!test_bit(SXE_SFP_NEED_RESET, &monitor->state))
goto l_end;
if (adapter->link.sfp_reset_timeout &&
time_after(adapter->link.sfp_reset_timeout, jiffies))
goto l_end;
if (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state))
goto l_end;
adapter->link.sfp_reset_timeout = jiffies + SXE_SFP_RESET_WAIT_TIME - 1;
ret = adapter->phy_ctxt.ops->identify(adapter);
if (ret) {
LOG_WARN_BDF("monitor identify sfp failed\n");
goto sfp_out;
}
if (!test_bit(SXE_SFP_NEED_RESET, &monitor->state))
goto sfp_out;
clear_bit(SXE_SFP_NEED_RESET, &monitor->state);
set_bit(SXE_LINK_NEED_CONFIG, &monitor->state);
LOG_MSG_INFO(probe, "SFP+ reset done, trigger link_config subtask\n");
sfp_out:
clear_bit(SXE_IN_SFP_INIT, &adapter->state);
if (ret == SXE_ERR_SFF_NOT_SUPPORTED &&
adapter->netdev->reg_state == NETREG_REGISTERED) {
LOG_DEV_ERR("failed to initialize because an unsupported\n"
"\tSFP+ module type was detected.\n");
LOG_DEV_ERR("reload the driver after installing a\n"
"\tsupported module.\n");
unregister_netdev(adapter->netdev);
}
l_end:
;
}
static void sxe_sfp_link_config_work(struct sxe_adapter *adapter)
{
s32 ret;
u32 speed;
bool autoneg;
struct sxe_monitor_context *monitor = &adapter->monitor_ctxt;
if (time_after(jiffies, adapter->link.sfp_multispeed_time +
#ifdef SXE_SFP_DEBUG
(HZ * sw_sfp_multi_gb_ms) / SXE_HZ_TRANSTO_MS)) {
#else
(HZ * SXE_SW_SFP_MULTI_GB_MS) /
SXE_HZ_TRANSTO_MS)) {
#endif
clear_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state);
}
if (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state))
goto l_sfp_end;
if (!test_bit(SXE_LINK_NEED_CONFIG, &monitor->state))
goto l_sfp_uninit;
adapter->phy_ctxt.ops->get_link_capabilities(adapter, &speed, &autoneg);
ret = sxe_link_configure(adapter, speed);
if (ret) {
LOG_DEV_ERR("link config err, ret=%d, try...\n", ret);
goto l_sfp_uninit;
}
clear_bit(SXE_LINK_NEED_CONFIG, &monitor->state);
set_bit(SXE_LINK_CHECK_REQUESTED, &monitor->state);
LOG_DEBUG("link_config subtask done, trigger link_check subtask\n");
adapter->link.check_timeout = jiffies;
l_sfp_uninit:
clear_bit(SXE_IN_SFP_INIT, &adapter->state);
l_sfp_end:
;
}
static void sxe_fc_tx_xoff_check(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
u32 i, xoff, dbu_to_mac_stats;
if (hw->fc.current_mode != SXE_FC_FULL &&
hw->fc.current_mode != SXE_FC_RX_PAUSE)
goto l_end;
xoff = hw->dbu.ops->tx_dbu_fc_status_get(hw);
dbu_to_mac_stats = hw->stat.ops->tx_dbu_to_mac_stats(hw);
xoff &= SXE_TFCS_PB0_MASK;
if (!xoff && !dbu_to_mac_stats)
goto l_end;
for (i = 0; i < adapter->tx_ring_ctxt.num; i++)
clear_bit(SXE_HANG_CHECK_ARMED, &adapter->tx_ring_ctxt.ring[i]->state);
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++)
clear_bit(SXE_HANG_CHECK_ARMED, &adapter->xdp_ring_ctxt.ring[i]->state);
l_end:
;
}
static void sxe_pfc_tx_xoff_check(struct sxe_adapter *adapter)
{
u8 tc;
struct sxe_hw *hw = &adapter->hw;
u32 i, data, xoff[SXE_PKG_BUF_NUM_MAX], dbu_to_mac_stats;
data = hw->dbu.ops->tx_dbu_fc_status_get(hw);
dbu_to_mac_stats = hw->stat.ops->tx_dbu_to_mac_stats(hw);
for (i = 0; i < SXE_PKG_BUF_NUM_MAX; i++) {
xoff[i] = SXE_TFCS_PB_MASK;
xoff[i] &= data & (SXE_TFCS_PB0_MASK << i);
}
for (i = 0; i < adapter->tx_ring_ctxt.num; i++) {
tc = adapter->tx_ring_ctxt.ring[i]->tc_idx;
if (!xoff[tc] && !dbu_to_mac_stats)
continue;
else
clear_bit(SXE_HANG_CHECK_ARMED, &adapter->tx_ring_ctxt.ring[i]->state);
}
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) {
tc = adapter->xdp_ring_ctxt.ring[i]->tc_idx;
if (!xoff[tc] && !dbu_to_mac_stats)
continue;
else
clear_bit(SXE_HANG_CHECK_ARMED, &adapter->xdp_ring_ctxt.ring[i]->state);
}
}
static void sxe_tx_xoff_check_work(struct sxe_adapter *adapter)
{
bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable;
#ifdef SXE_DCB_CONFIGURE
if (adapter->dcb_ctxt.ieee_pfc)
pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en);
#endif
if (!(adapter->cap & SXE_DCB_ENABLE) || !pfc_en)
sxe_fc_tx_xoff_check(adapter);
else
sxe_pfc_tx_xoff_check(adapter);
}
void sxe_work_cb(struct work_struct *work)
{
struct sxe_monitor_context *monitor =
container_of(work, struct sxe_monitor_context, work);
struct sxe_adapter *adapter =
container_of(monitor, struct sxe_adapter, monitor_ctxt);
if (sxe_hw_fault_handle_task(adapter))
goto l_end;
sxe_reset_work(adapter);
sxe_sfp_reset_work(adapter);
sxe_sfp_link_config_work(adapter);
sxe_detect_link_work(adapter);
sxe_stats_update_work(adapter);
sxe_tx_xoff_check_work(adapter);
sxe_fnav_sample_reinit_work(adapter);
sxe_check_hang_work(adapter);
sxe_ptp_timer_check(adapter);
l_end:
sxe_monitor_work_complete(adapter);
}
static void sxe_hw_fault_task_trigger(void *priv)
{
struct sxe_adapter *adapter = (struct sxe_adapter *)priv;
if (test_bit(SXE_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state)) {
sxe_monitor_work_schedule(adapter);
LOG_ERROR_BDF("sxe nic fault, submit monitor task and\n"
"\tperform the down operation\n");
}
}
void sxe_monitor_init(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
timer_setup(&adapter->monitor_ctxt.timer, sxe_timer_cb, 0);
INIT_WORK(&adapter->monitor_ctxt.work, sxe_work_cb);
set_bit(SXE_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state);
clear_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state);
sxe_hw_fault_handle_init(hw, sxe_hw_fault_task_trigger, adapter);
mutex_init(&adapter->link.carrier_mutex);
mutex_init(&adapter->stats.stats_mutex);
}

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_monitor.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_MONITOR_H__
#define __SXE_MONITOR_H__
#include <linux/types.h>
#include <linux/netdevice.h>
struct sxe_adapter;
enum sxe_monitor_task_state {
SXE_MONITOR_WORK_INITED,
SXE_MONITOR_WORK_SCHED,
SXE_RESET_REQUESTED,
SXE_LINK_CHECK_REQUESTED,
SXE_FNAV_REQUIRES_REINIT,
SXE_SFP_NEED_RESET,
SXE_LINK_NEED_CONFIG,
SXE_LINK_SPEED_CHANGE,
};
struct sxe_monitor_context {
struct timer_list timer;
struct work_struct work;
unsigned long state;
};
struct sxe_link_info {
bool is_up;
u32 speed;
/* in order to protect the data */
struct mutex carrier_mutex;
unsigned long check_timeout;
unsigned long sfp_reset_timeout;
unsigned long last_lkcfg_time;
unsigned long sfp_multispeed_time;
};
void sxe_monitor_init(struct sxe_adapter *adapter);
void sxe_monitor_work_schedule(struct sxe_adapter *adapter);
void sxe_task_timer_trigger(struct sxe_adapter *adapter);
void sxe_sfp_reset_task_submit(struct sxe_adapter *adapter);
void sxe_work_cb(struct work_struct *work);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_netdev.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_NETDEV_H__
#define __SXE_NETDEV_H__
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include "sxe.h"
#define SXE_GSO_PARTIAL_FEATURES \
(NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
s32 sxe_link_config(struct sxe_adapter *adapter);
int sxe_open(struct net_device *netdev);
int sxe_close(struct net_device *netdev);
void sxe_set_rx_mode(struct net_device *netdev);
void __sxe_set_rx_mode(struct net_device *netdev, bool lock);
bool netif_is_sxe(struct net_device *dev);
void sxe_netdev_init(struct net_device *netdev, struct pci_dev *pdev);
void sxe_down(struct sxe_adapter *adapter);
void sxe_up(struct sxe_adapter *adapter);
void sxe_terminate(struct sxe_adapter *adapter);
void sxe_hw_reinit(struct sxe_adapter *adapter);
void sxe_reset(struct sxe_adapter *adapter);
void sxe_do_reset(struct net_device *netdev);
s32 sxe_ring_reassign(struct sxe_adapter *adapter, u8 tc);
s32 sxe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
#ifndef NO_NEED_POOL_DEFRAG
void sxe_macvlan_pools_defrag(struct net_device *dev);
#endif
void sxe_macvlan_configure(struct sxe_adapter *adapter);
u32 sxe_sw_mtu_get(struct sxe_adapter *adapter);
void sxe_stats_update(struct sxe_adapter *adapter);
u32 sxe_mbps_link_speed_get(u32 speed);
#endif

View File

@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_pci.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe.h"
#include "sxe_pci.h"
bool sxe_check_cfg_fault(struct sxe_hw *hw, struct pci_dev *dev)
{
u16 value;
struct sxe_adapter *adapter = hw->adapter;
pci_read_config_word(dev, PCI_VENDOR_ID, &value);
if (value == SXE_READ_CFG_WORD_FAILED) {
sxe_hw_fault_handle(hw);
LOG_ERROR_BDF("pci vendorId:0x%x read pci config word fail,\n"
"\tremove adapter.\n",
PCI_VENDOR_ID);
return true;
}
return false;
}
u16 sxe_read_pci_cfg_word(struct pci_dev *pdev, struct sxe_hw *hw, u32 reg)
{
u16 value = SXE_READ_CFG_WORD_FAILED;
if (sxe_is_hw_fault(hw))
goto l_end;
pci_read_config_word(pdev, reg, &value);
if (value == SXE_READ_CFG_WORD_FAILED)
sxe_check_cfg_fault(hw, pdev);
l_end:
return value;
}
#ifdef CONFIG_PCI_IOV
u32 sxe_read_pci_cfg_dword(struct sxe_adapter *adapter, u32 reg)
{
struct sxe_hw *hw = &adapter->hw;
u32 value = SXE_FAILED_READ_CFG_DWORD;
if (sxe_is_hw_fault(hw))
goto l_end;
pci_read_config_dword(adapter->pdev, reg, &value);
if (value == SXE_FAILED_READ_CFG_DWORD)
sxe_check_cfg_fault(hw, adapter->pdev);
l_end:
return value;
}
#endif
u32 sxe_pcie_timeout_poll(struct pci_dev *pdev, struct sxe_hw *hw)
{
u16 devctl2;
u32 pollcnt;
devctl2 = sxe_read_pci_cfg_word(pdev, hw, SXE_PCI_DEVICE_CONTROL2);
devctl2 &= SXE_PCIDEVCTRL2_TIMEO_MASK;
switch (devctl2) {
case SXE_PCIDEVCTRL2_65_130ms:
pollcnt = 1300;
break;
case SXE_PCIDEVCTRL2_260_520ms:
pollcnt = 5200;
break;
case SXE_PCIDEVCTRL2_1_2s:
pollcnt = 20000;
break;
case SXE_PCIDEVCTRL2_4_8s:
pollcnt = 80000;
break;
case SXE_PCIDEVCTRL2_17_34s:
pollcnt = 34000;
break;
case SXE_PCIDEVCTRL2_50_100us:
case SXE_PCIDEVCTRL2_1_2ms:
case SXE_PCIDEVCTRL2_16_32ms:
case SXE_PCIDEVCTRL2_16_32ms_def:
default:
pollcnt = 800;
break;
}
return (pollcnt * 11) / 10;
}
unsigned long sxe_get_completion_timeout(struct sxe_adapter *adapter)
{
u16 devctl2;
pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
switch (devctl2 & SXE_PCIDEVCTRL2_TIMEO_MASK) {
case SXE_PCIDEVCTRL2_17_34s:
case SXE_PCIDEVCTRL2_4_8s:
case SXE_PCIDEVCTRL2_1_2s:
return 2000000ul;
case SXE_PCIDEVCTRL2_260_520ms:
return 520000ul;
case SXE_PCIDEVCTRL2_65_130ms:
return 130000ul;
case SXE_PCIDEVCTRL2_16_32ms:
return 32000ul;
case SXE_PCIDEVCTRL2_1_2ms:
return 2000ul;
case SXE_PCIDEVCTRL2_50_100us:
return 100ul;
case SXE_PCIDEVCTRL2_16_32ms_def:
return 32000ul;
default:
break;
}
return 32000ul;
}

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_pci.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef _SXE_PCI_H_
#define _SXE_PCI_H_
#include "sxe.h"
#define PCI_VENDOR_ID_STARS 0x1FF2
#define SXE_DEV_ID_ASIC 0x10a1
#define SXE_DMA_BIT_WIDTH_64 64
#define SXE_DMA_BIT_WIDTH_32 32
#define SXE_READ_CFG_WORD_FAILED 0xFFFFU
#define SXE_FAILED_READ_CFG_DWORD 0xFFFFFFFFU
u16 sxe_read_pci_cfg_word(struct pci_dev *pdev, struct sxe_hw *hw, u32 reg);
bool sxe_check_cfg_fault(struct sxe_hw *hw, struct pci_dev *dev);
unsigned long sxe_get_completion_timeout(struct sxe_adapter *adapter);
u32 sxe_pcie_timeout_poll(struct pci_dev *pdev, struct sxe_hw *hw);
u32 sxe_read_pci_cfg_dword(struct sxe_adapter *adapter, u32 reg);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,197 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_phy.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_PHY_H__
#define __SXE_PHY_H__
#include <linux/types.h>
#include <linux/netdevice.h>
#include "sxe_host_cli.h"
#include "sxe_cli.h"
#ifdef SXE_PHY_CONFIGURE
#include <linux/phy.h>
#include <linux/mdio.h>
#endif
#define SXE_DEV_ID_FIBER 0
#define SXE_DEV_ID_COPPER 1
#define SXE_SFF_BASE_ADDR 0x0
#define SXE_SFF_IDENTIFIER 0x0
#define SXE_SFF_10GBE_COMP_CODES 0x3
#define SXE_SFF_1GBE_COMP_CODES 0x6
#define SXE_SFF_CABLE_TECHNOLOGY 0x8
#define SXE_SFF_VENDOR_PN 0x28
#define SXE_SFF_8472_DIAG_MONITOR_TYPE 0x5C
#define SXE_SFF_8472_COMPLIANCE 0x5E
#define SXE_SFF_IDENTIFIER_SFP 0x3
#define SXE_SFF_ADDRESSING_MODE 0x4
#define SXE_SFF_8472_UNSUP 0x0
#define SXE_SFF_DDM_IMPLEMENTED 0x40
#define SXE_SFF_DA_PASSIVE_CABLE 0x4
#define SXE_SFF_DA_ACTIVE_CABLE 0x8
#define SXE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define SXE_SFF_1GBASESX_CAPABLE 0x1
#define SXE_SFF_1GBASELX_CAPABLE 0x2
#define SXE_SFF_1GBASET_CAPABLE 0x8
#define SXE_SFF_10GBASESR_CAPABLE 0x10
#define SXE_SFF_10GBASELR_CAPABLE 0x20
#define SXE_SFP_COMP_CODE_SIZE 10
#define SXE_SFP_VENDOR_PN_SIZE 16
#define SXE_SFP_EEPROM_SIZE_MAX 512
#define SXE_SW_SFP_LOS_DELAY_MS 200
#define SXE_SW_SFP_MULTI_GB_MS 4000
#define SXE_PHY_ADDR_MAX 32
#define SXE_MARVELL_88X3310_PHY_ID 0x2002B
#define SXE_RATE_SEL_WAIT (40)
#define SXE_LINK_UP_RETRY_CNT (5)
#define SXE_LINK_UP_RETRY_ITR (100)
#define SXE_SFP_RESET_WAIT (100)
#define SXE_DEVAD_SHIFT (16)
#define SXE_MII_DEV_TYPE_SHIFT (16)
#define SXE_LINK_SPEED_MBPS_10G 10000
#define SXE_LINK_SPEED_MBPS_1G 1000
#define SXE_LINK_SPEED_MBPS_100 100
#define SXE_LINK_SPEED_MBPS_10 10
struct sxe_adapter;
enum sxe_media_type {
SXE_MEDIA_TYPE_UNKWON = 0,
SXE_MEDIA_TYPE_FIBER = 1,
SXE_MEDIA_TYPE_COPPER = 2,
};
enum sxe_phy_idx {
SXE_SFP_IDX = 0,
SXE_PHY_MARVELL_88X3310_idx,
SXE_PHY_MAX,
};
enum sxe_phy_type {
SXE_PHY_MARVELL_88X3310,
SXE_PHY_GENERIC,
SXE_PHY_CU_UNKNOWN,
SXE_PHY_UNKNOWN,
};
enum sxe_sfp_type {
SXE_SFP_TYPE_DA_CU = 0,
SXE_SFP_TYPE_SRLR = 1,
SXE_SFP_TYPE_1G_CU = 2,
SXE_SFP_TYPE_1G_SXLX = 4,
SXE_SFP_TYPE_NOT_PRESENT = 5,
SXE_SFP_TYPE_UNKNOWN = 0xFFFF,
};
struct sxe_phy_ops {
s32 (*identify)(struct sxe_adapter *adapter);
s32 (*link_configure)(struct sxe_adapter *adapter, u32 speed);
void (*get_link_capabilities)(struct sxe_adapter *adapter, u32 *speed,
bool *autoneg);
s32 (*reset)(struct sxe_adapter *adapter);
void (*sfp_tx_laser_disable)(struct sxe_adapter *adapter);
void (*sfp_tx_laser_enable)(struct sxe_adapter *adapter);
};
#ifdef SXE_PHY_CONFIGURE
struct sxe_phy_info {
u32 id;
bool autoneg;
struct mii_bus *mii_bus;
enum sxe_phy_type type;
struct mdio_if_info mdio;
};
#endif
struct sxe_sfp_info {
enum sxe_sfp_type type;
bool inserted;
bool multispeed_fiber;
};
struct sxe_phy_context {
bool is_sfp;
u32 speed;
u32 autoneg_advertised;
struct sxe_phy_ops *ops;
#ifdef SXE_PHY_CONFIGURE
struct sxe_phy_info phy_info;
#endif
struct sxe_sfp_info sfp_info;
};
s32 sxe_phy_init(struct sxe_adapter *adapter);
#ifdef SXE_PHY_CONFIGURE
int sxe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr);
int sxe_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr,
u16 value);
s32 sxe_phy_identify(struct sxe_adapter *adapter);
void sxe_phy_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
bool *autoneg);
s32 sxe_phy_link_speed_configure(struct sxe_adapter *adapter, u32 speed);
s32 sxe_mdiobus_init(struct sxe_adapter *adapter);
void sxe_mdiobus_exit(struct sxe_adapter *adapter);
s32 sxe_phy_reset(struct sxe_adapter *adapter);
#endif
enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter);
static inline bool sxe_is_sfp(struct sxe_adapter *adapter)
{
return (sxe_media_type_get(adapter) == SXE_MEDIA_TYPE_FIBER) ? true :
false;
}
s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset, u16 len,
u8 *data);
s32 sxe_sfp_eeprom_write(struct sxe_adapter *adapter, u16 offset, u32 len,
u8 *data);
enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter);
void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter);
s32 sxe_sfp_vendor_pn_cmp(u8 *sfp_vendor_pn);
s32 sxe_sfp_aoc_vendor_pn_cmp(u8 *sfp_vendor_pn);
s32 sxe_sfp_identify(struct sxe_adapter *adapter);
s32 sxe_link_configure(struct sxe_adapter *adapter, u32 speed);
s32 sxe_sfp_reset(struct sxe_adapter *adapter);
void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed,
bool *link_up);
s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, enum sxe_pcs_mode mode,
u32 max_frame);
void sxe_fc_enable(struct sxe_adapter *adapter);
#endif

View File

@ -0,0 +1,692 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ptp.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe.h"
#include "sxe_ptp.h"
#include "sxe_log.h"
#include "sxe_hw.h"
static u64 sxe_ptp_read(const struct cyclecounter *cc)
{
struct sxe_adapter *adapter =
container_of(cc, struct sxe_adapter, ptp_ctxt.hw_cc);
struct sxe_hw *hw = &adapter->hw;
return hw->dbu.ops->ptp_systime_get(hw);
}
#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE
static int sxe_ptp_adjfine(struct ptp_clock_info *ptp, long ppm)
{
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
struct sxe_hw *hw = &adapter->hw;
u32 adj_ns;
u32 neg_adj = 0;
if (ppm < 0) {
neg_adj = SXE_TIMADJ_SIGN;
adj_ns = (u32)(-((ppm * 125) >> 13));
} else {
adj_ns = (u32)((ppm * 125) >> 13);
}
LOG_DEBUG_BDF("ptp adjfreq adj_ns=%u, neg_adj=0x%x\n", adj_ns, neg_adj);
hw->dbu.ops->ptp_freq_adjust(hw, (neg_adj | adj_ns));
return 0;
}
#else
static int sxe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
struct sxe_hw *hw = &adapter->hw;
u32 adj_ns;
u32 neg_adj = 0;
if (ppb < 0) {
neg_adj = SXE_TIMADJ_SIGN;
adj_ns = -ppb;
} else {
adj_ns = ppb;
}
LOG_DEBUG_BDF("ptp adjfreq adj_ns=%u, neg_adj=0x%x\n", adj_ns, neg_adj);
hw->dbu.ops->ptp_freq_adjust(hw, (neg_adj | adj_ns));
return 0;
}
#endif
static int sxe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
unsigned long flags;
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
timecounter_adjtime(&adapter->ptp_ctxt.hw_tc, delta);
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
LOG_INFO_BDF("ptp adjust systim, delta: %lld, after adj: %llu\n", delta,
adapter->ptp_ctxt.hw_tc.nsec);
;
return 0;
}
static int sxe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
unsigned long flags;
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
struct sxe_hw *hw = &adapter->hw;
u64 ns, systim_ns;
systim_ns = hw->dbu.ops->ptp_systime_get(hw);
LOG_DEBUG_BDF("ptp get time = %llu ns\n", systim_ns);
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
ns = timecounter_cyc2time(&adapter->ptp_ctxt.hw_tc, systim_ns);
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
LOG_DEBUG_BDF("timecounter_cyc2time = %llu ns\n", ns);
*ts = ns_to_timespec64(ns);
return 0;
}
static int sxe_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
unsigned long flags;
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
u64 ns = timespec64_to_ns(ts);
LOG_DEBUG_BDF("ptp settime = %llu ns\n", ns);
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
timecounter_init(&adapter->ptp_ctxt.hw_tc, &adapter->ptp_ctxt.hw_cc, ns);
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
return 0;
}
static int sxe_ptp_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
s32 ret = 0;
struct sxe_adapter *adapter =
container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info);
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_ctxt.ptp_setup_spp) {
ret = -EOPNOTSUPP;
goto l_ret;
}
if (on)
adapter->cap |= SXE_PTP_PPS_ENABLED;
else
adapter->cap &= ~SXE_PTP_PPS_ENABLED;
adapter->ptp_ctxt.ptp_setup_spp(adapter);
l_ret:
return ret;
}
static inline void
sxe_ptp_clock_info_init(struct ptp_clock_info *ptp_clock_info, char *name)
{
snprintf(ptp_clock_info->name, sizeof(ptp_clock_info->name), "%s",
name);
ptp_clock_info->owner = THIS_MODULE;
ptp_clock_info->max_adj = SXE_PTP_MAX_ADJ;
ptp_clock_info->n_alarm = 0;
ptp_clock_info->n_ext_ts = 0;
ptp_clock_info->n_per_out = 0;
ptp_clock_info->pps = 0;
#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE
ptp_clock_info->adjfine = sxe_ptp_adjfine;
#else
ptp_clock_info->adjfreq = sxe_ptp_adjfreq;
#endif
ptp_clock_info->adjtime = sxe_ptp_adjtime;
ptp_clock_info->gettime64 = sxe_ptp_gettime;
ptp_clock_info->settime64 = sxe_ptp_settime;
ptp_clock_info->enable = sxe_ptp_feature_enable;
}
static long sxe_ptp_clock_create(struct sxe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
long ret = 0;
if (!IS_ERR_OR_NULL(adapter->ptp_ctxt.ptp_clock))
goto l_ret;
sxe_ptp_clock_info_init(&adapter->ptp_ctxt.ptp_clock_info,
netdev->name);
LOG_DEBUG_BDF("init ptp[%s] info finish\n",
adapter->ptp_ctxt.ptp_clock_info.name);
adapter->ptp_ctxt.ptp_clock =
ptp_clock_register(&adapter->ptp_ctxt.ptp_clock_info,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_ctxt.ptp_clock)) {
ret = PTR_ERR(adapter->ptp_ctxt.ptp_clock);
adapter->ptp_ctxt.ptp_clock = NULL;
LOG_DEV_ERR("ptp_clock_register failed\n");
goto l_ret;
} else if (adapter->ptp_ctxt.ptp_clock) {
LOG_DEV_INFO("registered PHC device on %s\n", netdev->name);
}
adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
l_ret:
return ret;
}
static void sxe_ptp_clear_tx_timestamp(struct sxe_adapter *adapter)
{
if (adapter->ptp_ctxt.ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_ctxt.ptp_tx_skb);
adapter->ptp_ctxt.ptp_tx_skb = NULL;
}
clear_bit_unlock(SXE_PTP_TX_IN_PROGRESS, &adapter->state);
}
void sxe_ptp_overflow_check(struct sxe_adapter *adapter)
{
unsigned long flags;
bool timeout =
time_is_before_jiffies(adapter->ptp_ctxt.last_overflow_check
+ SXE_OVERFLOW_PERIOD);
if (timeout) {
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
timecounter_read(&adapter->ptp_ctxt.hw_tc);
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
adapter->ptp_ctxt.last_overflow_check = jiffies;
}
}
void sxe_ptp_rx_hang(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
bool rx_tmstamp_valid;
struct sxe_ring *rx_ring;
unsigned long rx_event;
u16 n;
rx_tmstamp_valid = hw->dbu.ops->ptp_is_rx_timestamp_valid(hw);
if (!rx_tmstamp_valid) {
adapter->ptp_ctxt.last_rx_ptp_check = jiffies;
goto l_ret;
}
rx_event = adapter->ptp_ctxt.last_rx_ptp_check;
for (n = 0; n < adapter->rx_ring_ctxt.num ; n++) {
rx_ring = adapter->rx_ring_ctxt.ring[n];
if (time_after(rx_ring->last_rx_timestamp, rx_event))
rx_event = rx_ring->last_rx_timestamp;
}
if (time_is_before_jiffies(rx_event + SXE_PTP_RX_TIMEOUT)) {
hw->dbu.ops->ptp_rx_timestamp_clear(hw);
adapter->ptp_ctxt.last_rx_ptp_check = jiffies;
adapter->stats.sw.rx_hwtstamp_cleared++;
LOG_MSG_DEBUG(drv, "clearing RX Timestamp hang\n");
}
l_ret:
;
}
void sxe_ptp_tx_hang(struct sxe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->ptp_ctxt.ptp_tx_start +
SXE_PTP_TX_TIMEOUT);
if (!adapter->ptp_ctxt.ptp_tx_skb) {
LOG_INFO_BDF("no ptp skb to progress\n");
goto l_ret;
}
if (!test_bit(SXE_PTP_TX_IN_PROGRESS, &adapter->state)) {
LOG_INFO_BDF("tx ptp not in progress\n");
goto l_ret;
}
if (timeout) {
cancel_work_sync(&adapter->ptp_ctxt.ptp_tx_work);
sxe_ptp_clear_tx_timestamp(adapter);
adapter->stats.sw.tx_hwtstamp_timeouts++;
LOG_MSG_WARN(drv, "clearing Tx timestamp hang\n");
}
l_ret:
;
}
static void sxe_ptp_convert_to_hwtstamp(struct sxe_adapter *adapter,
struct skb_shared_hwtstamps *hwtstamp,
u64 timestamp)
{
unsigned long flags;
u64 ns;
memset(hwtstamp, 0, sizeof(*hwtstamp));
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
ns = timecounter_cyc2time(&adapter->ptp_ctxt.hw_tc, timestamp);
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
hwtstamp->hwtstamp = ns_to_ktime(ns);
}
static void sxe_ptp_tx_hwtstamp_process(struct sxe_adapter *adapter)
{
struct sk_buff *skb = adapter->ptp_ctxt.ptp_tx_skb;
struct skb_shared_hwtstamps shhwtstamps;
struct timespec64 ts;
u64 ns;
ts.tv_nsec = adapter->ptp_ctxt.tx_hwtstamp_nsec;
ts.tv_sec = adapter->ptp_ctxt.tx_hwtstamp_sec;
ns = (u64)timespec64_to_ns(&ts);
LOG_DEBUG_BDF("get tx timestamp value=%llu\n", ns);
sxe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, ns);
adapter->ptp_ctxt.ptp_tx_skb = NULL;
clear_bit_unlock(SXE_PTP_TX_IN_PROGRESS, &adapter->state);
#ifdef SXE_PTP_ONE_STEP
if (!(adapter->cap & SXE_1588V2_ONE_STEP)) {
skb_tstamp_tx(skb, &shhwtstamps);
} else {
if (adapter->cap & SXE_1588V2_ONE_STEP)
adapter->cap &= ~SXE_1588V2_ONE_STEP;
}
#endif
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
void sxe_ptp_get_rx_tstamp_in_pkt(struct sxe_irq_data *irq_data,
struct sk_buff *skb)
{
__le64 ptp_tm;
struct sxe_adapter *adapter = irq_data->adapter;
skb_copy_bits(skb, skb->len - SXE_TS_HDR_LEN, &ptp_tm, SXE_TS_HDR_LEN);
__pskb_trim(skb, skb->len - SXE_TS_HDR_LEN);
LOG_DEBUG_BDF("ptp get timestamp in pkt end = %llu\n",
le64_to_cpu(ptp_tm));
sxe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(ptp_tm));
}
void sxe_ptp_get_rx_tstamp_in_reg(struct sxe_irq_data *irq_data,
struct sk_buff *skb)
{
struct sxe_adapter *adapter = irq_data->adapter;
struct sxe_hw *hw = &adapter->hw;
u64 ptp_tm;
bool rx_tstamp_valid;
if (!irq_data || !irq_data->adapter)
goto l_ret;
rx_tstamp_valid = hw->dbu.ops->ptp_is_rx_timestamp_valid(hw);
if (rx_tstamp_valid) {
ptp_tm = hw->dbu.ops->ptp_rx_timestamp_get(hw);
sxe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), ptp_tm);
} else {
LOG_INFO_BDF("rx timestamp not valid in rx hw rigister\n");
goto l_ret;
}
l_ret:
;
}
static void sxe_ptp_tx_work_handler(struct work_struct *work)
{
struct sxe_adapter *adapter = container_of(work, struct sxe_adapter,
ptp_ctxt.ptp_tx_work);
struct sxe_hw *hw = &adapter->hw;
bool timeout = time_is_before_jiffies(adapter->ptp_ctxt.ptp_tx_start +
SXE_PTP_TX_TIMEOUT);
u32 ts_sec;
u32 ts_ns;
u32 last_sec;
u32 last_ns;
bool tx_tstamp_valid = true;
u8 i;
if (!adapter->ptp_ctxt.ptp_tx_skb) {
sxe_ptp_clear_tx_timestamp(adapter);
goto l_ret;
}
hw->dbu.ops->ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
if (ts_ns != adapter->ptp_ctxt.tx_hwtstamp_nsec ||
ts_sec != adapter->ptp_ctxt.tx_hwtstamp_sec) {
for (i = 0; i < SXE_TXTS_POLL_CHECK; i++)
hw->dbu.ops->ptp_tx_timestamp_get(hw, &last_sec, &last_ns);
for (; i < SXE_TXTS_POLL; i++) {
hw->dbu.ops->ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
if (last_ns != ts_ns || last_sec != ts_sec) {
tx_tstamp_valid = false;
break;
}
}
if (tx_tstamp_valid) {
adapter->ptp_ctxt.tx_hwtstamp_nsec = ts_ns;
adapter->ptp_ctxt.tx_hwtstamp_sec = ts_sec;
sxe_ptp_tx_hwtstamp_process(adapter);
return;
}
LOG_MSG_DEBUG(drv,
"Tx timestamp error,\n"
"\tts: %u %u, last ts: %u %u\n",
ts_sec, ts_ns, last_sec, last_ns);
}
if (timeout) {
sxe_ptp_clear_tx_timestamp(adapter);
adapter->stats.sw.tx_hwtstamp_timeouts++;
LOG_MSG_WARN(drv, "clearing Tx timestamp hang\n");
} else {
schedule_work(&adapter->ptp_ctxt.ptp_tx_work);
}
l_ret:
;
}
static s32 sxe_ptp_tx_type_get(s32 tx_type, u32 *tsctl)
{
s32 ret = 0;
switch (tx_type) {
case HWTSTAMP_TX_OFF:
*tsctl = SXE_TSCTRL_VER_2;
break;
case HWTSTAMP_TX_ON:
*tsctl |= SXE_TSCTRL_TSEN;
break;
default:
ret = -ERANGE;
}
return ret;
}
static s32 sxe_ptp_rx_filter_get(s32 *rx_filter, u32 *cap, bool *is_v1,
bool *is_l2, u32 *tses)
{
s32 ret = 0;
switch (*rx_filter) {
case HWTSTAMP_FILTER_NONE:
*cap &= ~(SXE_RX_HWTSTAMP_ENABLED |
SXE_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
*is_v1 = true;
*tses |= SXE_TSES_TXES_V1_SYNC | SXE_TSES_RXES_V1_SYNC;
*cap |= (SXE_RX_HWTSTAMP_ENABLED | SXE_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
*is_v1 = true;
*tses |=
SXE_TSES_TXES_V1_DELAY_REQ | SXE_TSES_RXES_V1_DELAY_REQ;
*cap |= (SXE_RX_HWTSTAMP_ENABLED | SXE_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
*is_l2 = true;
*tses |= SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL;
*rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
*cap |= (SXE_RX_HWTSTAMP_ENABLED | SXE_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
*is_v1 = true;
*tses |= SXE_TSES_TXES_V1_ALL | SXE_TSES_RXES_V1_ALL;
*rx_filter = HWTSTAMP_FILTER_ALL;
*cap |= SXE_RX_HWTSTAMP_ENABLED;
break;
#ifndef HAVE_NO_HWTSTAMP_FILTER_NTP_ALL
case HWTSTAMP_FILTER_NTP_ALL:
#endif
case HWTSTAMP_FILTER_ALL:
*tses |= SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL;
*rx_filter = HWTSTAMP_FILTER_ALL;
*cap |= SXE_RX_HWTSTAMP_ENABLED;
break;
default:
*cap &= ~(SXE_RX_HWTSTAMP_ENABLED | SXE_RX_HWTSTAMP_IN_REGISTER);
*rx_filter = HWTSTAMP_FILTER_NONE;
ret = -ERANGE;
}
return ret;
}
static int sxe_ptp_set_timestamp_mode(struct sxe_adapter *adapter,
struct hwtstamp_config *config)
{
struct sxe_hw *hw = &adapter->hw;
u32 tsctl = 0x0;
u32 tses = 0x0;
bool is_l2 = false;
bool is_v1 = false;
s32 ret;
if (config->flags) {
ret = -EINVAL;
goto l_ret;
}
LOG_DEBUG_BDF("ptp set timestamp mode: tx_type[0x%x], rx_filter[0x%x]\n",
config->tx_type, config->rx_filter);
ret = sxe_ptp_tx_type_get(config->tx_type, &tsctl);
if (ret) {
LOG_ERROR_BDF("ptp get tx type err ret = %d\n", ret);
goto l_ret;
}
ret = sxe_ptp_rx_filter_get(&config->rx_filter, &adapter->cap, &is_v1,
&is_l2, &tses);
if (ret) {
LOG_ERROR_BDF("ptp get rx filter err ret = %d\n", ret);
goto l_ret;
}
LOG_DEBUG_BDF("hw[%p] set hw timestamp: is_l2=%s, tsctl=0x%x,\n"
"\ttses=0x%x\n",
hw, is_l2 ? "true" : "false", tsctl, tses);
hw->dbu.ops->ptp_timestamp_mode_set(hw, is_l2, tsctl, tses);
hw->dbu.ops->ptp_timestamp_enable(hw);
sxe_ptp_clear_tx_timestamp(adapter);
hw->dbu.ops->ptp_rx_timestamp_clear(hw);
#ifdef SXE_PTP_ONE_STEP
adapter->cap &= ~SXE_1588V2_ONE_STEP;
#endif
l_ret:
return ret;
}
int sxe_ptp_hw_tstamp_config_set(struct sxe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config config;
int ret;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) {
ret = -EFAULT;
goto l_ret;
}
ret = sxe_ptp_set_timestamp_mode(adapter, &config);
if (ret) {
LOG_ERROR_BDF("ptp set timestamp mode failed, err=%d\n", ret);
goto l_ret;
}
memcpy(&adapter->ptp_ctxt.tstamp_config, &config,
sizeof(adapter->ptp_ctxt.tstamp_config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
l_ret:
return ret;
}
int sxe_ptp_hw_tstamp_config_get(struct sxe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config *config = &adapter->ptp_ctxt.tstamp_config;
return copy_to_user(ifr->ifr_data, config,
sizeof(*config)) ? -EFAULT : 0;
}
static void sxe_ptp_cyclecounter_start(struct sxe_adapter *adapter)
{
struct cyclecounter cc;
unsigned long flags;
struct sxe_hw *hw = &adapter->hw;
cc.mask = CLOCKSOURCE_MASK(64);
cc.mult = 1;
cc.shift = 0;
cc.read = sxe_ptp_read;
hw->dbu.ops->ptp_systime_init(hw);
/* in order to force CPU ordering */
smp_mb();
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
memcpy(&adapter->ptp_ctxt.hw_cc, &cc, sizeof(adapter->ptp_ctxt.hw_cc));
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
}
static void sxe_ptp_hw_init(struct sxe_adapter *adapter)
{
struct sxe_hw *hw = &adapter->hw;
hw->dbu.ops->ptp_init(hw);
}
static inline void sxe_ptp_systime_init(struct sxe_adapter *adapter)
{
unsigned long flags;
spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags);
timecounter_init(&adapter->ptp_ctxt.hw_tc, &adapter->ptp_ctxt.hw_cc,
ktime_get_real_ns());
spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags);
}
void sxe_ptp_reset(struct sxe_adapter *adapter)
{
sxe_ptp_hw_init(adapter);
sxe_ptp_set_timestamp_mode(adapter, &adapter->ptp_ctxt.tstamp_config);
sxe_ptp_cyclecounter_start(adapter);
sxe_ptp_systime_init(adapter);
adapter->ptp_ctxt.last_overflow_check = jiffies;
adapter->ptp_ctxt.tx_hwtstamp_nsec = 0;
adapter->ptp_ctxt.tx_hwtstamp_sec = 0;
}
void sxe_ptp_configure(struct sxe_adapter *adapter)
{
spin_lock_init(&adapter->ptp_ctxt.ptp_timer_lock);
if (sxe_ptp_clock_create(adapter)) {
LOG_DEBUG_BDF("create ptp err in addr:[%p]\n",
adapter->ptp_ctxt.ptp_clock);
goto l_end;
}
INIT_WORK(&adapter->ptp_ctxt.ptp_tx_work, sxe_ptp_tx_work_handler);
sxe_ptp_reset(adapter);
set_bit(SXE_PTP_RUNNING, &adapter->state);
l_end:
;
}
void sxe_ptp_suspend(struct sxe_adapter *adapter)
{
if (!test_and_clear_bit(SXE_PTP_RUNNING, &adapter->state))
goto l_ret;
adapter->cap &= ~SXE_PTP_PPS_ENABLED;
if (adapter->ptp_ctxt.ptp_setup_spp)
adapter->ptp_ctxt.ptp_setup_spp(adapter);
cancel_work_sync(&adapter->ptp_ctxt.ptp_tx_work);
sxe_ptp_clear_tx_timestamp(adapter);
l_ret:
;
}
void sxe_ptp_stop(struct sxe_adapter *adapter)
{
sxe_ptp_suspend(adapter);
if (adapter->ptp_ctxt.ptp_clock) {
ptp_clock_unregister(adapter->ptp_ctxt.ptp_clock);
adapter->ptp_ctxt.ptp_clock = NULL;
LOG_DEV_INFO("removed PHC on %s\n", adapter->netdev->name);
}
}

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ptp.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_PTP_H__
#define __SXE_PTP_H__
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
#include "sxe.h"
#define SXE_OVERFLOW_PERIOD (HZ * 30)
#define SXE_PTP_TX_TIMEOUT (HZ)
#define SXE_TS_HDR_LEN 8
#define SXE_PTP_RX_TIMEOUT (5 * HZ)
#define SXE_TIMADJ_SIGN 0x80000000
#define SXE_PTP_MSG_TYPE_SYNC 0x0000
#define SXE_PTP_MSG_TYPE_DELAY_REQ 0x0001
#define SXE_PTP_MSG_TYPE_MASK 0x000F
#define SXE_PTP_FLAGFIELD_OFFSET 0x0006
#define SXE_PTP_FLAGFIELD_TWOSTEP 0x0002
#define SXE_PTP_FLAGFIELD_UNICAST 0x0004
#define SXE_PTP_FLAGFIELD_MASK 0xFFFF
#define SXE_PTP_MAX_ADJ 125000000
void sxe_ptp_configure(struct sxe_adapter *adapter);
void sxe_ptp_suspend(struct sxe_adapter *adapter);
void sxe_ptp_stop(struct sxe_adapter *adapter);
void sxe_ptp_overflow_check(struct sxe_adapter *adapter);
void sxe_ptp_rx_hang(struct sxe_adapter *adapter);
void sxe_ptp_tx_hang(struct sxe_adapter *adapter);
void sxe_ptp_reset(struct sxe_adapter *adapter);
int sxe_ptp_hw_tstamp_config_set(struct sxe_adapter *adapter,
struct ifreq *ifr);
int sxe_ptp_hw_tstamp_config_get(struct sxe_adapter *adapter,
struct ifreq *ifr);
void sxe_ptp_get_rx_tstamp_in_pkt(struct sxe_irq_data *irq_data,
struct sk_buff *skb);
void sxe_ptp_get_rx_tstamp_in_reg(struct sxe_irq_data *irq_data,
struct sk_buff *skb);
static inline void sxe_ptp_rx_hwtstamp_process(struct sxe_ring *rx_ring,
union sxe_rx_data_desc *rx_desc,
struct sk_buff *skb)
{
LOG_DEBUG("process rx hwtsamp of ring[%u]\n", rx_ring->idx);
if (unlikely(sxe_status_err_check(rx_desc, SXE_RXD_STAT_TSIP))) {
sxe_ptp_get_rx_tstamp_in_pkt(rx_ring->irq_data, skb);
LOG_DEBUG("we got the time stamp in the end of packet\n");
goto l_ret;
}
if (unlikely(!sxe_status_err_check(rx_desc, SXE_RXDADV_STAT_TS))) {
LOG_DEBUG("the ptp time stamp is not ready in register\n");
goto l_ret;
}
sxe_ptp_get_rx_tstamp_in_reg(rx_ring->irq_data, skb);
LOG_DEBUG("we got the time stamp in the time register\n");
rx_ring->last_rx_timestamp = jiffies;
l_ret:
;
}
#endif

View File

@ -0,0 +1,539 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ring.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/kernel.h>
#include <linux/raid/pq.h>
#include "sxe.h"
#include "sxe_log.h"
u16 sxe_rss_num_get(struct sxe_adapter *adapter)
{
return (((adapter->cap & SXE_SRIOV_DCB_ENABLE) ==
SXE_SRIOV_DCB_ENABLE)) ?
SXE_NON_RSS_RING_NUM :
adapter->ring_f.rss;
}
u16 sxe_rss_mask_get(struct sxe_adapter *adapter)
{
u16 mask;
u8 tcs = sxe_dcb_tc_get(adapter);
u16 num = adapter->pool_f.total_num;
switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) {
case (SXE_SRIOV_DCB_ENABLE):
mask = SXE_RSS_DISABLED_MASK;
break;
case SXE_DCB_ENABLE:
mask = (tcs <= SXE_DCB_4_TC) ? SXE_RSS_16Q_MASK :
SXE_RSS_8Q_MASK;
break;
case SXE_SRIOV_ENABLE:
mask = (num <= SXE_32_POOL) ? SXE_RSS_4Q_MASK : SXE_RSS_2Q_MASK;
break;
default:
mask = SXE_RSS_16Q_MASK;
break;
}
return mask;
}
u16 sxe_pool_mask_get(struct sxe_adapter *adapter)
{
u16 mask;
u8 tcs = sxe_dcb_tc_get(adapter);
u16 pool_total_num = adapter->pool_f.total_num;
switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) {
case (SXE_SRIOV_DCB_ENABLE):
mask = (tcs > 4) ? SXE_8Q_PER_POOL_MASK : SXE_4Q_PER_POOL_MASK;
break;
case SXE_SRIOV_ENABLE:
mask = (pool_total_num > SXE_32_POOL) ? SXE_2Q_PER_POOL_MASK :
SXE_4Q_PER_POOL_MASK;
break;
default:
mask = 0;
break;
}
return mask;
}
void sxe_ring_feature_init(struct sxe_adapter *adapter)
{
u16 rss, fnav;
rss = min_t(u16, SXE_RSS_RING_NUM_MAX, num_online_cpus());
adapter->ring_f.rss_limit = rss;
fnav = min_t(u16, SXE_FNAV_RING_NUM_MAX, num_online_cpus());
adapter->ring_f.fnav_limit = fnav;
adapter->pool_f.pf_num_used = 1;
adapter->pool_f.pf_num_limit = SXE_NUM_PF_POOL_DEFAULT;
adapter->tx_ring_ctxt.depth = SXE_DEFAULT_DESC_CNT;
adapter->rx_ring_ctxt.depth = SXE_DEFAULT_DESC_CNT;
adapter->xdp_ring_ctxt.depth = 0;
}
static void sxe_dcb_sriov_ring_set(struct sxe_adapter *adapter)
{
u32 i;
u8 tcs = sxe_dcb_tc_get(adapter);
u16 pf_pool_num_max = SXE_TXRX_RING_NUM_MAX / tcs;
struct sxe_pool_feature *pool_f = &adapter->pool_f;
struct sxe_ring_feature *ring_f = &adapter->ring_f;
pool_f->pf_num_used = min_t(u16, pool_f->pf_num_limit, pf_pool_num_max);
pool_f->total_num = pool_f->pf_num_used + pool_f->vf_num_used;
if (tcs > SXE_DCB_4_TC)
pool_f->total_num = min_t(u16, pool_f->total_num, SXE_16_POOL);
else
pool_f->total_num = min_t(u16, pool_f->total_num, SXE_32_POOL);
pool_f->pf_num_used = pool_f->total_num - pool_f->vf_num_used;
ring_f->tc_per_pool = tcs;
adapter->rx_ring_ctxt.num = pool_f->pf_num_used * ring_f->tc_per_pool;
adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num;
adapter->xdp_ring_ctxt.num = 0;
for (i = 0; i < tcs; i++)
netdev_set_tc_queue(adapter->netdev, i, SXE_NON_RSS_RING_NUM, i);
adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE;
LOG_INFO_BDF("tcs = %d, pf_num_used = %d,\n"
"\tpool_total_num=%d, tc_per_pool=%d, rx_num=%u,\n"
"\ttx_num=%u, adapter_cap = 0x%x\n",
tcs, pool_f->pf_num_used, pool_f->total_num,
ring_f->tc_per_pool, adapter->rx_ring_ctxt.num,
adapter->tx_ring_ctxt.num, adapter->cap);
}
static void sxe_dcb_ring_set(struct sxe_adapter *adapter)
{
u32 i;
u16 ring_per_tc;
u8 tcs = sxe_dcb_tc_get(adapter);
struct net_device *dev = adapter->netdev;
struct sxe_ring_feature *ring_f = &adapter->ring_f;
ring_per_tc = dev->num_tx_queues / tcs;
if (tcs > SXE_DCB_4_TC)
ring_per_tc = min_t(u16, ring_per_tc, SXE_8_RING_PER_TC);
else
ring_per_tc = min_t(u16, ring_per_tc, SXE_16_RING_PER_TC);
ring_per_tc = min_t(s32, ring_per_tc, ring_f->rss_limit);
ring_f->ring_per_tc = ring_per_tc;
adapter->rx_ring_ctxt.num = ring_per_tc * tcs;
adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num;
adapter->xdp_ring_ctxt.num = 0;
for (i = 0; i < tcs; i++)
netdev_set_tc_queue(dev, i, ring_per_tc, ring_per_tc * i);
adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE;
LOG_INFO_BDF("tcs = %d, ring_per_tc=%d,\n"
"\trx_num=%u, tx_num=%u, adapter_cap = 0x%x\n",
tcs, ring_f->ring_per_tc, adapter->rx_ring_ctxt.num,
adapter->tx_ring_ctxt.num, adapter->cap);
}
static void sxe_sriov_ring_set(struct sxe_adapter *adapter)
{
u16 num_pool, ring_per_pool;
struct sxe_pool_feature *pool_f = &adapter->pool_f;
struct sxe_ring_feature *ring_f = &adapter->ring_f;
u16 pf_num_used = pool_f->pf_num_limit;
u16 max_ring_per_pool = SXE_TXRX_RING_NUM_MAX / pf_num_used;
ring_per_pool = min_t(u16, ring_f->rss_limit, max_ring_per_pool);
num_pool = pf_num_used + pool_f->vf_num_used;
num_pool = min_t(u16, SXE_POOLS_NUM_MAX, num_pool);
pf_num_used = num_pool - pool_f->vf_num_used;
if (num_pool > SXE_32_POOL) {
ring_per_pool = min_t(u16, ring_per_pool, SXE_2_RING_PER_POOL);
} else {
ring_per_pool = (ring_per_pool > SXE_3_RING_PER_POOL) ?
SXE_4_RING_PER_POOL :
(ring_per_pool > SXE_1_RING_PER_POOL) ?
SXE_2_RING_PER_POOL :
SXE_1_RING_PER_POOL;
}
ring_f->ring_per_pool = ring_per_pool;
pool_f->total_num = num_pool;
pool_f->pf_num_used = pf_num_used;
adapter->rx_ring_ctxt.num = pf_num_used * ring_per_pool;
adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num;
adapter->xdp_ring_ctxt.num = 0;
#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT
if (pf_num_used > SXE_NUM_PF_POOL_DEFAULT)
netdev_set_num_tc(adapter->netdev, SXE_DCB_1_TC);
#endif
netdev_set_tc_queue(adapter->netdev, 0, ring_f->ring_per_pool, 0);
adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE;
LOG_INFO_BDF("pf_num_used = %d, pool_total_num=%d,\n"
"\tring_per_pool=%d, rx_num=%u, tx_num=%u, rss_limit=%u,\n"
"\tadapter_cap = 0x%x\n",
pool_f->pf_num_used, pool_f->total_num,
ring_f->ring_per_pool, adapter->rx_ring_ctxt.num,
adapter->tx_ring_ctxt.num, ring_f->rss_limit,
adapter->cap);
}
static u16 sxe_xdp_queues_num_get(struct sxe_adapter *adapter)
{
u16 queues = min_t(u16, SXE_XDP_RING_NUM_MAX, nr_cpu_ids);
return adapter->xdp_prog ? queues : 0;
}
static void sxe_rss_ring_set(struct sxe_adapter *adapter)
{
u16 rss;
struct sxe_ring_feature *ring_f = &adapter->ring_f;
ring_f->rss = ring_f->rss_limit;
rss = ring_f->rss;
adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE;
if (rss > SXE_NON_RSS_RING_NUM)
adapter->cap |= SXE_RSS_ENABLE;
if ((adapter->cap & SXE_RSS_ENABLE) && adapter->fnav_ctxt.sample_rate) {
ring_f->fnav_num = ring_f->fnav_limit;
rss = ring_f->fnav_num;
if (!(adapter->cap & SXE_FNAV_SPECIFIC_ENABLE))
adapter->cap |= SXE_FNAV_SAMPLE_ENABLE;
}
adapter->rx_ring_ctxt.num = rss;
adapter->tx_ring_ctxt.num = rss;
adapter->xdp_ring_ctxt.num = sxe_xdp_queues_num_get(adapter);
LOG_INFO_BDF("rss=%u, rss_limit=%u, fnav_limit=%u\n"
"\trx_num=%u, tx_num=%u, xdp_num=%u cap=0x%x\n",
ring_f->rss, ring_f->rss_limit, ring_f->fnav_limit,
adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num,
adapter->xdp_ring_ctxt.num, adapter->cap);
}
void sxe_ring_num_set(struct sxe_adapter *adapter)
{
adapter->rx_ring_ctxt.num = SXE_NON_RSS_RING_NUM;
adapter->tx_ring_ctxt.num = SXE_NON_RSS_RING_NUM;
adapter->xdp_ring_ctxt.num = 0;
adapter->pool_f.pf_num_used = 1;
adapter->ring_f.ring_per_pool = 1;
switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) {
case (SXE_SRIOV_DCB_ENABLE):
sxe_dcb_sriov_ring_set(adapter);
break;
case SXE_DCB_ENABLE:
sxe_dcb_ring_set(adapter);
break;
case SXE_SRIOV_ENABLE:
sxe_sriov_ring_set(adapter);
break;
default:
sxe_rss_ring_set(adapter);
break;
}
LOG_INFO_BDF("set ring num, cap = 0x%x\n", adapter->cap);
}
static void sxe_dcb_sriov_ring_reg_map(struct sxe_adapter *adapter)
{
u32 i;
u16 reg_idx;
u8 tcs = sxe_dcb_tc_get(adapter);
u16 pool = 0;
u16 pool_mask = sxe_pool_mask_get(adapter);
struct sxe_pool_feature *pool_f = &adapter->pool_f;
reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask);
for (i = 0; i < adapter->rx_ring_ctxt.num; i++, reg_idx++) {
if ((reg_idx & ~pool_mask) >= tcs) {
pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask);
}
adapter->rx_ring_ctxt.ring[i]->reg_idx = reg_idx;
adapter->rx_ring_ctxt.ring[i]->netdev =
pool ? NULL : adapter->netdev;
}
reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask);
for (i = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++) {
if ((reg_idx & ~pool_mask) >= tcs)
reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask);
adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx;
}
LOG_INFO_BDF("dcb sriov ring to reg mapping\n");
}
static void sxe_first_reg_idx_get(u8 tcs, u8 tc_idx, u32 *tx_idx, u32 *rx_idx)
{
if (tcs > SXE_DCB_4_TC) {
*rx_idx = tc_idx << SXE_8TC_RX_RING_SHIFT_4;
if (tc_idx < SXE_TC_IDX3) {
*tx_idx = tc_idx << SXE_TC2_TX_RING_SHIFT_5;
} else if (tc_idx < SXE_TC_IDX5) {
*tx_idx = (tc_idx + SXE_TX_RING_OFFSET_2)
<< SXE_TC4_TX_RING_SHIFT_4;
} else {
*tx_idx = (tc_idx + SXE_TX_RING_OFFSET_8)
<< SXE_TC5_TX_RING_SHIFT_3;
}
} else {
*rx_idx = tc_idx << SXE_4TC_RX_RING_SHIFT_5;
if (tc_idx < SXE_TC_IDX2)
*tx_idx = tc_idx << SXE_TC1_TX_RING_SHIFT_6;
else
*tx_idx = (tc_idx + SXE_TX_RING_OFFSET_4)
<< SXE_TC4_TX_RING_SHIFT_4;
}
}
static void sxe_dcb_ring_reg_map(struct sxe_adapter *adapter)
{
u32 i, offset;
u16 ring_per_tc;
u32 tx_idx = 0;
u32 rx_idx = 0;
u32 tc_idx = 0;
u8 tcs = sxe_dcb_tc_get(adapter);
ring_per_tc = adapter->ring_f.ring_per_tc;
for (offset = 0; tc_idx < tcs; tc_idx++, offset += ring_per_tc) {
sxe_first_reg_idx_get(tcs, tc_idx, &tx_idx, &rx_idx);
for (i = 0; i < ring_per_tc; i++, tx_idx++, rx_idx++) {
adapter->tx_ring_ctxt.ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring_ctxt.ring[offset + i]->reg_idx = rx_idx;
adapter->rx_ring_ctxt.ring[offset + i]->netdev =
adapter->netdev;
adapter->tx_ring_ctxt.ring[offset + i]->tc_idx = tc_idx;
adapter->rx_ring_ctxt.ring[offset + i]->tc_idx = tc_idx;
}
}
LOG_INFO_BDF("dcb ring to reg mapping\n");
}
static void sxe_sriov_ring_reg_map(struct sxe_adapter *adapter)
{
u32 i;
u16 reg_idx;
u16 pool = 0;
u16 pool_mask = sxe_pool_mask_get(adapter);
u16 rss_mask = sxe_rss_mask_get(adapter);
struct sxe_pool_feature *pool_f = &adapter->pool_f;
struct sxe_ring_feature *ring_f = &adapter->ring_f;
reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask);
for (i = 0; i < adapter->rx_ring_ctxt.num; i++, reg_idx++) {
if ((reg_idx & ~pool_mask) >= ring_f->ring_per_pool) {
pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask);
}
adapter->rx_ring_ctxt.ring[i]->reg_idx = reg_idx;
adapter->rx_ring_ctxt.ring[i]->netdev =
pool ? NULL : adapter->netdev;
LOG_INFO_BDF("rx ring idx[%u] map to reg idx[%d]\n", i,
reg_idx);
}
reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask);
for (i = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++) {
if ((reg_idx & rss_mask) >= ring_f->ring_per_pool)
reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask);
adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx;
LOG_INFO_BDF("tx ring idx[%u] map to reg idx[%d]\n", i, reg_idx);
}
LOG_INFO_BDF("sriov ring to reg mapping\n");
}
static void sxe_rss_ring_reg_map(struct sxe_adapter *adapter)
{
u32 i, reg_idx;
for (i = 0; i < adapter->rx_ring_ctxt.num; i++) {
adapter->rx_ring_ctxt.ring[i]->reg_idx = i;
adapter->rx_ring_ctxt.ring[i]->netdev = adapter->netdev;
}
for (i = 0, reg_idx = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++)
adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++, reg_idx++)
adapter->xdp_ring_ctxt.ring[i]->reg_idx = reg_idx;
LOG_INFO_BDF("rss ring to reg mapping\n");
}
void sxe_ring_reg_map(struct sxe_adapter *adapter)
{
adapter->rx_ring_ctxt.ring[0]->reg_idx = 0;
adapter->tx_ring_ctxt.ring[0]->reg_idx = 0;
switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) {
case (SXE_SRIOV_DCB_ENABLE):
sxe_dcb_sriov_ring_reg_map(adapter);
break;
case SXE_DCB_ENABLE:
sxe_dcb_ring_reg_map(adapter);
break;
case SXE_SRIOV_ENABLE:
sxe_sriov_ring_reg_map(adapter);
break;
default:
sxe_rss_ring_reg_map(adapter);
break;
}
LOG_INFO_BDF("ring to reg mapping, cap = %x\n", adapter->cap);
}
static void sxe_add_ring(struct sxe_ring *ring, struct sxe_list *head)
{
ring->next = head->next;
head->next = ring;
head->cnt++;
}
void sxe_tx_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx)
{
struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx];
struct sxe_ring *ring = &irq_data->ring[base];
u16 txr_idx = ring_idx;
LOG_INFO_BDF("irq_idx:%u tx_ring_cnt:%u base:%u ring_idx:%u.\n",
irq_idx, cnt, base, ring_idx);
while (cnt) {
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->irq_data = irq_data;
sxe_add_ring(ring, &irq_data->tx.list);
irq_data->tx.irq_rate.next_update = jiffies + 1;
ring->depth = adapter->tx_ring_ctxt.depth;
ring->idx = txr_idx;
WRITE_ONCE(adapter->tx_ring_ctxt.ring[txr_idx], ring);
cnt--;
txr_idx += adapter->irq_ctxt.ring_irq_num;
ring++;
}
}
void sxe_xdp_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx)
{
struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx];
struct sxe_ring *ring = &irq_data->ring[base];
u16 xdp_idx = ring_idx;
LOG_INFO_BDF("irq_idx:%u xdp_ring_cnt:%u base:%u ring_idx:%u.\n",
irq_idx, cnt, base, ring_idx);
while (cnt) {
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->irq_data = irq_data;
irq_data->tx.xdp_ring = ring;
ring->depth = adapter->tx_ring_ctxt.depth;
ring->idx = xdp_idx;
set_ring_xdp(ring);
spin_lock_init(&ring->tx_lock);
WRITE_ONCE(adapter->xdp_ring_ctxt.ring[xdp_idx], ring);
cnt--;
xdp_idx++;
ring++;
}
}
void sxe_rx_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx)
{
struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx];
struct sxe_ring *ring = &irq_data->ring[base];
u16 rxr_idx = ring_idx;
LOG_INFO_BDF("irq_idx:%u rx_ring_cnt:%u base:%u ring_idx:%u.\n",
irq_idx, cnt, base, ring_idx);
while (cnt) {
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->irq_data = irq_data;
sxe_add_ring(ring, &irq_data->rx.list);
irq_data->rx.irq_rate.next_update = jiffies + 1;
ring->depth = adapter->rx_ring_ctxt.depth;
ring->idx = rxr_idx;
WRITE_ONCE(adapter->rx_ring_ctxt.ring[rxr_idx], ring);
cnt--;
rxr_idx += adapter->irq_ctxt.ring_irq_num;
ring++;
}
}
void sxe_ring_stats_init(struct sxe_adapter *adapter)
{
u32 i;
for (i = 0; i < adapter->rx_ring_ctxt.num; i++)
u64_stats_init(&adapter->rx_ring_ctxt.ring[i]->syncp);
for (i = 0; i < adapter->tx_ring_ctxt.num; i++)
u64_stats_init(&adapter->tx_ring_ctxt.ring[i]->syncp);
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++)
u64_stats_init(&adapter->xdp_ring_ctxt.ring[i]->syncp);
}

View File

@ -0,0 +1,465 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ring.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_RING_H__
#define __SXE_RING_H__
#include "sxe_compat.h"
#include <linux/skbuff.h>
#ifndef HAVE_NO_XDP_BUFF_RXQ
#include <net/xdp.h>
#endif
struct sxe_adapter;
struct sxe_irq_data;
#define SXE_NON_RSS_RING_NUM 1
#define MIN_QUEUES_IN_SRIOV 4
#define SXE_RSS_RING_NUM_MAX 16
#define SXE_TXRX_RING_NUM_MAX 64
#define SXE_XDP_RING_NUM_MAX SXE_TXRX_RING_NUM_MAX
#define SXE_FNAV_RING_NUM_MAX (SXE_TXRX_RING_NUM_MAX - 1)
#define SXE_NUM_PF_POOL_DEFAULT 1
#define SXE_PAGE_SIZE_4KB 4096
#define SXE_PAGE_SIZE_8KB 8192
#define SXE_PAGE_SIZE_64KB 65536
#define SXE_IP_HEAD_LEN_UNIT 4
#define SXE_TC_IDX2 2
#define SXE_TC_IDX3 3
#define SXE_TC_IDX5 5
#define SXE_DCB_TC_MAX 8
#define SXE_8_RING_PER_TC 8
#define SXE_16_RING_PER_TC 16
#define SXE_TX_RING_OFFSET_2 2
#define SXE_TX_RING_OFFSET_4 4
#define SXE_TX_RING_OFFSET_8 8
#define SXE_TC1_TX_RING_SHIFT_6 6
#define SXE_TC2_TX_RING_SHIFT_5 5
#define SXE_TC4_TX_RING_SHIFT_4 4
#define SXE_TC5_TX_RING_SHIFT_3 3
#define SXE_8TC_RX_RING_SHIFT_4 4
#define SXE_4TC_RX_RING_SHIFT_5 5
#define SXE_MAX_QOS_IDX 7
#define SXE_DESC_CNT_MAX 4096
#define SXE_DESC_CNT_MIN 64
#define SXE_DEFAULT_DESC_CNT 512
#define SXE_REQ_DESCRIPTOR_MULTIPLE 8
#define SXE_TX_WORK_LIMIT 256
#define SXE_RSS_16Q_MASK 0xF
#define SXE_RSS_8Q_MASK 0x7
#define SXE_RSS_4Q_MASK 0x3
#define SXE_RSS_2Q_MASK 0x1
#define SXE_RSS_DISABLED_MASK 0x0
#define SXE_RXD_STAT_DD 0x01
#define SXE_RXD_STAT_EOP 0x02
#define SXE_RXD_STAT_FLM 0x04
#define SXE_RXD_STAT_VP 0x08
#define SXE_RXDADV_NEXTP_MASK 0x000FFFF0
#define SXE_RXDADV_NEXTP_SHIFT 0x00000004
#define SXE_RXD_STAT_UDPCS 0x10
#define SXE_RXD_STAT_L4CS 0x20
#define SXE_RXD_STAT_IPCS 0x40
#define SXE_RXD_STAT_PIF 0x80
#define SXE_RXD_STAT_CRCV 0x100
#define SXE_RXD_STAT_OUTERIPCS 0x100
#define SXE_RXD_STAT_VEXT 0x200
#define SXE_RXD_STAT_UDPV 0x400
#define SXE_RXD_STAT_DYNINT 0x800
#define SXE_RXD_STAT_LLINT 0x800
#define SXE_RXD_STAT_TSIP 0x08000
#define SXE_RXD_STAT_TS 0x10000
#define SXE_RXD_STAT_SECP 0x20000
#define SXE_RXD_STAT_LB 0x40000
#define SXE_RXD_STAT_ACK 0x8000
#define SXE_RXD_ERR_CE 0x01
#define SXE_RXD_ERR_LE 0x02
#define SXE_RXD_ERR_PE 0x08
#define SXE_RXD_ERR_OSE 0x10
#define SXE_RXD_ERR_USE 0x20
#define SXE_RXD_ERR_TCPE 0x40
#define SXE_RXD_ERR_IPE 0x80
#define SXE_RXDADV_ERR_MASK 0xfff00000
#define SXE_RXDADV_ERR_SHIFT 20
#define SXE_RXDADV_ERR_OUTERIPER 0x04000000
#define SXE_RXDADV_ERR_FCEOFE 0x80000000
#define SXE_RXDADV_ERR_FCERR 0x00700000
#define SXE_RXDADV_ERR_FNAV_LEN 0x00100000
#define SXE_RXDADV_ERR_FNAV_DROP 0x00200000
#define SXE_RXDADV_ERR_FNAV_COLL 0x00400000
#define SXE_RXDADV_ERR_HBO 0x00800000
#define SXE_RXDADV_ERR_CE 0x01000000
#define SXE_RXDADV_ERR_LE 0x02000000
#define SXE_RXDADV_ERR_PE 0x08000000
#define SXE_RXDADV_ERR_OSE 0x10000000
#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000
#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000
#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
#define SXE_RXDADV_ERR_USE 0x20000000
#define SXE_RXDADV_ERR_L4E 0x40000000
#define SXE_RXDADV_ERR_IPE 0x80000000
#define SXE_RXD_VLAN_ID_MASK 0x0FFF
#define SXE_RXD_PRI_MASK 0xE000
#define SXE_RXD_PRI_SHIFT 13
#define SXE_RXD_CFI_MASK 0x1000
#define SXE_RXD_CFI_SHIFT 12
#define SXE_RXDADV_LROCNT_MASK 0x001E0000
#define SXE_RXDADV_LROCNT_SHIFT 17
#define SXE_MAX_VLAN_IDX 4095
#define SXE_RXDADV_STAT_DD SXE_RXD_STAT_DD
#define SXE_RXDADV_STAT_EOP SXE_RXD_STAT_EOP
#define SXE_RXDADV_STAT_FLM SXE_RXD_STAT_FLM
#define SXE_RXDADV_STAT_VP SXE_RXD_STAT_VP
#define SXE_RXDADV_STAT_MASK 0x000fffff
#define SXE_RXDADV_STAT_TS 0x00010000
#define SXE_RXDADV_STAT_SECP 0x00020000
#define SXE_RXDADV_RSSTYPE_NONE 0x00000000
#define SXE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
#define SXE_RXDADV_RSSTYPE_IPV4 0x00000002
#define SXE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
#define SXE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
#define SXE_RXDADV_RSSTYPE_IPV6 0x00000005
#define SXE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
#define SXE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
#define SXE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
#define SXE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
#define SXE_RSS_L4_TYPES_MASK \
((1ul << SXE_RXDADV_RSSTYPE_IPV4_TCP) | \
(1ul << SXE_RXDADV_RSSTYPE_IPV4_UDP) | \
(1ul << SXE_RXDADV_RSSTYPE_IPV6_TCP) | \
(1ul << SXE_RXDADV_RSSTYPE_IPV6_UDP))
#define SXE_RXDADV_PKTTYPE_NONE 0x00000000
#define SXE_RXDADV_PKTTYPE_IPV4 0x00000010
#define SXE_RXDADV_PKTTYPE_IPV4_EX 0x00000020
#define SXE_RXDADV_PKTTYPE_IPV6 0x00000040
#define SXE_RXDADV_PKTTYPE_IPV6_EX 0x00000080
#define SXE_RXDADV_PKTTYPE_TCP 0x00000100
#define SXE_RXDADV_PKTTYPE_UDP 0x00000200
#define SXE_RXDADV_PKTTYPE_SCTP 0x00000400
#define SXE_RXDADV_PKTTYPE_NFS 0x00000800
#define SXE_RXDADV_PKTTYPE_VXLAN 0x00000800
#define SXE_RXDADV_PKTTYPE_TUNNEL 0x00010000
#define SXE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000
#define SXE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000
#define SXE_RXDADV_PKTTYPE_LINKSEC 0x00004000
#define SXE_RXDADV_PKTTYPE_ETQF 0x00008000
#define SXE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070
#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4
#define SXE_HW_RING_IN_POOL(POOL_MASK) (__ALIGN_MASK(1, ~(POOL_MASK)))
#define PF_POOL_INDEX(p) ((p) + adapter->pool_f.vf_num_used)
#define SXE_RX_DESC(R, i) \
(&(((union sxe_rx_data_desc *)((R)->desc.base_addr))[i]))
#define ring_is_lro_enabled(ring) test_bit(SXE_RX_LRO_ENABLED, &(ring)->state)
#define set_ring_lro_enabled(ring) set_bit(SXE_RX_LRO_ENABLED, &(ring)->state)
#define clear_ring_lro_enabled(ring) \
clear_bit(SXE_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_xdp(ring) test_bit(SXE_TX_XDP_RING, &(ring)->state)
#define set_ring_xdp(ring) set_bit(SXE_TX_XDP_RING, &(ring)->state)
#define clear_ring_xdp(ring) clear_bit(SXE_TX_XDP_RING, &(ring)->state)
#define sxe_for_each_ring(head) \
for (ring = (head).next; ring; ring = ring->next)
#define SXE_TX_DESC(R, i) \
(&(((union sxe_tx_data_desc *)((R)->desc.base_addr))[i]))
#define SXE_TX_CTXTDESC(R, i) \
(&(((struct sxe_tx_context_desc *)((R)->desc.base_addr))[i]))
#define SXE_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define SXE_TX_WAKE_THRESHOLD (SXE_TX_DESC_NEEDED * 2)
#define SXE_TX_NON_DATA_DESC_NUM 3
#define SXE_DATA_PER_DESC_SIZE_SHIFT 14
#define SXE_DATA_PER_DESC_SIZE_MAX BIT(SXE_DATA_PER_DESC_SIZE_SHIFT)
#define SXE_TX_DESC_USE_COUNT(S) DIV_ROUND_UP((S), SXE_DATA_PER_DESC_SIZE_MAX)
#define SXE_TX_DESC_PREFETCH_THRESH_1 1
#define SXE_TX_DESC_PREFETCH_THRESH_8 8
#define SXE_TX_DESC_HOST_THRESH 1
#define SXE_TX_DESC_WRITEBACK_THRESH 32
#define SXE_MAX_TXRX_DESC_POLL 10
enum sxe_ring_state {
SXE_RX_3K_BUFFER,
SXE_RX_BUILD_SKB_ENABLED,
SXE_RX_LRO_ENABLED,
SXE_TX_FNAV_INIT_DONE,
SXE_TX_XPS_INIT_DONE,
SXE_TX_DETECT_HANG,
SXE_HANG_CHECK_ARMED,
SXE_TX_XDP_RING,
SXE_TX_DISABLED,
};
#define SXE_TX_HANG_CHECK_ACTIVE(ring) \
set_bit(SXE_TX_DETECT_HANG, &(ring)->state)
#define SXE_TX_HANG_CHECK_COMPLETE(ring) \
clear_bit(SXE_TX_DETECT_HANG, &(ring)->state)
#define SXE_DETECT_TX_HANG_NEED(ring) \
test_bit(SXE_TX_DETECT_HANG, &(ring)->state)
struct sxe_ring_stats {
u64 packets;
u64 bytes;
};
struct sxe_tx_ring_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
};
struct sxe_rx_ring_stats {
u64 lro_count;
u64 lro_flush;
u64 non_eop_descs;
u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
};
struct sxe_ring_desc {
void *base_addr;
u8 __iomem *tail;
dma_addr_t dma;
};
struct sxe_ring {
struct sxe_ring *next;
struct sxe_irq_data *irq_data;
struct net_device *netdev;
struct device *dev;
u8 idx;
u8 reg_idx;
u8 tc_idx;
u16 ring_idx;
unsigned long state;
u16 next_to_use;
u16 next_to_clean;
u16 depth;
u32 size;
struct sxe_ring_desc desc;
union {
struct sxe_tx_buffer *tx_buffer_info;
struct sxe_rx_buffer *rx_buffer_info;
};
union {
u16 next_to_alloc;
struct {
u8 fnav_sample_rate;
u8 fnav_sample_count;
};
};
unsigned long last_rx_timestamp;
u16 rx_offset;
struct bpf_prog *xdp_prog;
#ifndef HAVE_NO_XDP_BUFF_RXQ
struct xdp_rxq_info xdp_rxq;
#endif
/* in order to protect the data */
spinlock_t tx_lock;
#ifdef HAVE_AF_XDP_ZERO_COPY
#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL
struct zero_copy_allocator zca;
#endif
#ifndef HAVE_NETDEV_BPF_XSK_BUFF_POOL
struct xdp_umem *xsk_pool;
#else
struct xsk_buff_pool *xsk_pool;
#endif
u16 rx_buf_len;
#endif
struct sxe_ring_stats stats;
struct u64_stats_sync syncp;
union {
struct sxe_tx_ring_stats tx_stats;
struct sxe_rx_ring_stats rx_stats;
};
} ____cacheline_internodealigned_in_smp;
struct sxe_ring_context {
u16 num;
u16 depth;
struct sxe_ring
*ring[SXE_TXRX_RING_NUM_MAX] ____cacheline_aligned_in_smp;
};
struct sxe_ring_feature {
u16 rss_limit;
union {
u16 tc_per_pool;
u16 ring_per_pool;
u16 ring_per_tc;
u16 rss;
};
u16 fnav_limit;
u16 fnav_num;
} ____cacheline_internodealigned_in_smp;
struct sxe_pool_feature {
u16 total_num;
u16 pf_num_limit;
u16 pf_num_used;
u16 vf_num_used;
} ____cacheline_internodealigned_in_smp;
union sxe_tx_data_desc {
struct {
__le64 buffer_addr;
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd;
__le32 nxtseq_seed;
__le32 status;
} wb;
};
struct sxe_tx_context_desc {
__le32 vlan_macip_lens;
__le32 sa_idx;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
union sxe_rx_data_desc {
struct {
__le64 pkt_addr;
__le64 hdr_addr;
} read;
struct {
struct {
union {
__le32 data;
struct {
__le16 pkt_info;
__le16 hdr_info;
} hs_rss;
} lo_dword;
union {
__le32 rss;
struct {
__le16 ip_id;
__le16 csum;
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error;
__le16 length;
__le16 vlan;
} upper;
} wb;
};
struct sxe_tx_buffer {
union sxe_tx_data_desc *next_to_watch;
unsigned long time_stamp;
union {
struct sk_buff *skb;
#ifdef HAVE_XDP_SUPPORT
struct xdp_frame *xdpf;
#endif
};
u32 bytecount;
u16 gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_features;
};
struct sxe_rx_buffer {
#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL
struct sk_buff *skb;
dma_addr_t dma;
#endif
union {
struct {
#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL
struct sk_buff *skb;
dma_addr_t dma;
#endif
struct page *page;
u32 page_offset;
u16 pagecnt_bias;
};
#ifdef HAVE_AF_XDP_ZERO_COPY
struct {
#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL
bool discard;
struct xdp_buff *xdp;
#else
void *addr;
u64 handle;
#endif
};
#endif
};
};
u16 sxe_rss_num_get(struct sxe_adapter *adapter);
u16 sxe_rss_mask_get(struct sxe_adapter *adapter);
u16 sxe_pool_mask_get(struct sxe_adapter *adapter);
void sxe_ring_num_set(struct sxe_adapter *adapter);
void sxe_ring_reg_map(struct sxe_adapter *adapter);
void sxe_ring_feature_init(struct sxe_adapter *adapter);
void sxe_ring_stats_init(struct sxe_adapter *adapter);
static inline __le32 sxe_status_err_check(union sxe_rx_data_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
void sxe_tx_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx);
void sxe_xdp_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx);
void sxe_rx_ring_init(struct sxe_adapter *adapter, u16 base, u16 cnt,
u16 ring_idx, u16 irq_idx);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,236 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_rx_proc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_RX_H__
#define __SXE_RX_H__
#include "sxe.h"
#define ALIGN_4K (4096)
#define SXE_RX_BUFFER_WRITE (16)
#define SXE_RXBUFFER_256 (256)
#define SXE_RXBUFFER_1536 (1536)
#define SXE_RXBUFFER_2K (2048)
#define SXE_RXBUFFER_3K (3072)
#define SXE_RXBUFFER_4K (4096)
#define SXE_MAX_RXBUFFER (16384)
#define SXE_RX_HDR_SIZE SXE_RXBUFFER_256
#define SXE_MIN_LRO_ITR (24)
#define SXE_RXDADV_RSSTYPE_MASK (0x0000000F)
#define SXE_ETH_FRAMING (20)
#define ETH_RSS_IPV4 BIT_ULL(2)
#define ETH_RSS_NONFRAG_IPV4_TCP BIT_ULL(4)
#define ETH_RSS_NONFRAG_IPV4_UDP BIT_ULL(5)
#define ETH_RSS_IPV6 BIT_ULL(8)
#define ETH_RSS_NONFRAG_IPV6_TCP BIT_ULL(10)
#define ETH_RSS_NONFRAG_IPV6_UDP BIT_ULL(11)
#define SXE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
#define SXE_B2BT(BT) ((BT) * 8)
#define SXE_PFC_D 672
#define SXE_CABLE_DC 5556
#define SXE_CABLE_DO 5000
#define SXE_PHY_D 12800
#define SXE_MAC_D 4096
#define SXE_XAUI_D (2 * 1024)
#define SXE_ID (SXE_MAC_D + SXE_XAUI_D + SXE_PHY_D)
#define SXE_HD 6144
#define SXE_PCI_DELAY 10000
#define SXE_DV(_max_frame_link, _max_frame_tc) \
((36 * \
(SXE_B2BT(_max_frame_link) + SXE_PFC_D + \
(2 * SXE_CABLE_DC) + (2 * SXE_ID) + SXE_HD) / \
25 + \
1) + \
2 * SXE_B2BT(_max_frame_tc))
#define SXE_LOW_DV(_max_frame_tc) \
(2 * (2 * SXE_B2BT(_max_frame_tc) + (36 * SXE_PCI_DELAY / 25) + 1))
struct sxe_skb_ctrl_buffer {
union {
struct sk_buff *head;
struct sk_buff *tail;
};
dma_addr_t dma;
u16 lro_cnt;
bool page_released;
};
struct sxe_rss_hash_config {
u8 *rss_key;
u8 rss_key_len;
u64 rss_hf;
};
#define SXE_CTRL_BUFFER(skb) ((struct sxe_skb_ctrl_buffer *)(skb)->cb)
#ifdef HAVE_DMA_ATTRS_STRUCT
#define SXE_RX_DMA_ATTR NULL
#else
#define SXE_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#endif
#define ring_uses_build_skb(ring) \
test_bit(SXE_RX_BUILD_SKB_ENABLED, &(ring)->state)
#if (PAGE_SIZE < 8192)
#define SXE_MAX_2K_FRAME_BUILD_SKB (SXE_RXBUFFER_1536 - NET_IP_ALIGN)
#define SXE_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + SXE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(SXE_RXBUFFER_2K))
static inline u32 sxe_compute_pad(u32 rx_buf_len)
{
u32 page_size, pad_size;
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
return pad_size;
}
static inline u32 sxe_skb_pad(void)
{
u32 rx_buf_len;
if (SXE_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = SXE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
else
rx_buf_len = SXE_RXBUFFER_1536;
rx_buf_len -= NET_IP_ALIGN;
return sxe_compute_pad(rx_buf_len);
}
#define SXE_SKB_PAD sxe_skb_pad()
#else
#define SXE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
static inline u16 sxe_rx_pg_order(struct sxe_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (test_bit(SXE_RX_3K_BUFFER, &ring->state))
return 1;
#endif
return 0;
}
#define sxe_rx_pg_size(_ring) (PAGE_SIZE << sxe_rx_pg_order(_ring))
s32 sxe_rss_hash_conf_get(struct sxe_adapter *adapter,
struct sxe_rss_hash_config *rss_conf);
s32 sxe_rx_configure(struct sxe_adapter *adapter);
void sxe_rx_ring_free(struct sxe_ring *ring);
void sxe_rx_resources_free(struct sxe_adapter *adapter);
s32 sxe_rx_ring_depth_reset(struct sxe_adapter *adapter, u32 rx_cnt);
void sxe_rx_ring_buffer_clean(struct sxe_ring *ring);
u32 sxe_rx_ring_irq_clean(struct sxe_irq_data *q_vector,
struct sxe_ring *rx_ring, const u32 budget);
void sxe_hw_rx_disable(struct sxe_adapter *adapter);
void sxe_hw_rx_configure(struct sxe_adapter *adapter);
void sxe_skb_fields_process(struct sxe_ring *rx_ring,
union sxe_rx_data_desc *rx_desc,
struct sk_buff *skb);
void sxe_rx_skb_deliver(struct sxe_irq_data *irq_data, struct sk_buff *skb);
void sxe_rx_ring_attr_configure(struct sxe_adapter *adapter,
struct sxe_ring *ring);
static inline void sxe_rx_ring_buffer_init(struct sxe_ring *rx_ring)
{
memset(rx_ring->rx_buffer_info, 0,
sizeof(struct sxe_rx_buffer) * rx_ring->depth);
}
static inline void sxe_rx_pkt_stats_update(struct sxe_ring *rx_ring,
struct sxe_irq_rate *irq_rate,
struct sxe_ring_stats *stats)
{
LOG_DEBUG("in the irq, process total packets[%llu], bytes[%llu]\n",
stats->packets, stats->bytes);
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += stats->packets;
rx_ring->stats.bytes += stats->bytes;
u64_stats_update_end(&rx_ring->syncp);
irq_rate->total_packets += stats->packets;
irq_rate->total_bytes += stats->bytes;
}
void sxe_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 cleaned_count);
s32 sxe_test_rx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring);
static inline u16 sxe_desc_unused(struct sxe_ring *ring)
{
u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->depth) + ntc - ntu - 1;
}
static inline u32 sxe_rx_bufsz(struct sxe_ring *ring)
{
u32 bufsz;
if (test_bit(SXE_RX_3K_BUFFER, &ring->state)) {
bufsz = SXE_RXBUFFER_3K;
goto l_ret;
}
#if (PAGE_SIZE < 8192)
if (ring_uses_build_skb(ring)) {
bufsz = SXE_MAX_2K_FRAME_BUILD_SKB;
goto l_ret;
}
#endif
bufsz = SXE_RXBUFFER_2K;
l_ret:
return bufsz;
}
static inline void sxe_rx_release(struct sxe_adapter *adapter)
{
sxe_rx_resources_free(adapter);
}
static inline u32 sxe_rss_redir_tbl_size_get(void)
{
return SXE_MAX_RETA_ENTRIES;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,241 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_sriov.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_SRIOV_H__
#define __SXE_SRIOV_H__
#include "sxe.h"
#define SXE_VF_FUNCTION_MAX 64
#define SXE_VF_DRV_MAX (SXE_VF_FUNCTION_MAX - 1)
#define SXE_MAX_VFS_1TC SXE_VF_FUNCTION_MAX
#define SXE_MAX_VFS_4TC 32
#define SXE_MAX_VFS_8TC 16
#define SXE_MSG_NUM(size) DIV_ROUND_UP(size, 4)
#define SXE_MSGTYPE_ACK 0x80000000
#define SXE_MSGTYPE_NACK 0x40000000
#define SXE_VFREQ_RESET 0x01
#define SXE_VFREQ_MAC_ADDR_SET 0x02
#define SXE_VFREQ_MC_ADDR_SYNC 0x03
#define SXE_VFREQ_VLAN_SET 0x04
#define SXE_VFREQ_LPE_SET 0x05
#define SXE_VFREQ_UC_ADDR_SYNC 0x06
#define SXE_VFREQ_API_NEGOTIATE 0x08
#define SXE_VFREQ_RING_INFO_GET 0x09
#define SXE_VFREQ_REDIR_TBL_GET 0x0a
#define SXE_VFREQ_RSS_KEY_GET 0x0b
#define SXE_VFREQ_CAST_MODE_SET 0x0c
#define SXE_VFREQ_LINK_ENABLE_GET 0X0d
#define SXE_VFREQ_IPSEC_ADD 0x0e
#define SXE_VFREQ_IPSEC_DEL 0x0f
#define SXE_VFREQ_RSS_CONF_GET 0x10
#define SXE_VFREQ_MASK 0xFF
#define SXE_CTRL_MSG_LINK_UPDATE 0x100
#define SXE_CTRL_MSG_NETDEV_DOWN 0x200
#define SXE_CTRL_MSG_REINIT 0x400
#define SXE_PF_CTRL_MSG_MASK 0x700
#define SXE_PFREQ_MASK 0xFF00
#define SXE_VF_MC_ADDR_NUM_SHIFT 16
#define SXE_VFREQ_MSGINFO_SHIFT 16
#define SXE_VFREQ_MSGINFO_MASK (0xFF << SXE_VFREQ_MSGINFO_SHIFT)
#define SXE_RETA_ENTRIES_DWORDS (SXE_MAX_RETA_ENTRIES / 16)
#define SXE_VF_DISABLE_WAIT 100
enum sxe_mbx_api_version {
SXE_MBX_API_10 = 0,
SXE_MBX_API_11,
SXE_MBX_API_12,
SXE_MBX_API_13,
SXE_MBX_API_14,
SXE_MBX_API_NR,
};
enum sxe_cast_mode {
SXE_CAST_MODE_NONE = 0,
SXE_CAST_MODE_MULTI,
SXE_CAST_MODE_ALLMULTI,
SXE_CAST_MODE_PROMISC,
};
struct sxe_msg_table {
u32 msg_type;
s32 (*msg_func)(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx);
};
struct sxe_mbx_api_msg {
u32 msg_type;
u32 api_version;
};
struct sxe_uc_addr_msg {
u32 msg_type;
u8 uc_addr[ETH_ALEN];
u16 pad;
};
struct sxe_rst_rcv {
u32 msg_type;
};
struct sxe_rst_reply {
u32 msg_type;
u32 mac_addr[2];
u32 mc_filter_type;
u32 sw_mtu;
};
struct sxe_rst_msg {
union {
struct sxe_rst_rcv rcv;
struct sxe_rst_reply reply;
};
};
struct sxe_ring_info_msg {
u32 msg_type;
u8 max_rx_num;
u8 max_tx_num;
u8 tc_num;
u8 default_tc;
};
struct sxe_mc_sync_msg {
u16 msg_type;
u16 mc_cnt;
u16 mc_addr_extract[SXE_VF_MC_ENTRY_NUM_MAX];
};
struct sxe_uc_sync_msg {
u16 msg_type;
u16 index;
u32 addr[2];
};
struct sxe_cast_mode_msg {
u32 msg_type;
u32 cast_mode;
};
struct sxe_redir_tbl_msg {
u32 type;
u32 entries[SXE_RETA_ENTRIES_DWORDS];
};
struct sxe_rss_hsah_key_msg {
u32 type;
u8 hash_key[SXE_RSS_KEY_SIZE];
};
struct sxe_rss_hash_msg {
u32 type;
u8 hash_key[SXE_RSS_KEY_SIZE];
u64 rss_hf;
};
struct sxe_ipsec_add_msg {
u32 msg_type;
u32 pf_sa_idx;
__be32 spi;
u8 flags;
u8 proto;
u16 family;
__be32 ip_addr[4];
u32 key[5];
};
struct sxe_ipsec_del_msg {
u32 msg_type;
u32 pf_sa_idx;
};
struct sxe_link_enable_msg {
u32 msg_type;
bool link_enable;
};
s32 sxe_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
#ifdef HAVE_NDO_SET_VF_LINK_STATE
s32 sxe_set_vf_link_state(struct net_device *netdev, s32 vf_idx, s32 state);
void sxe_vf_enable_and_reinit_notify_vf_all(struct sxe_adapter *adapter);
#endif
void sxe_sriov_init(struct sxe_adapter *adapter);
void sxe_vf_exit(struct sxe_adapter *adapter);
s32 sxe_sriov_configure(struct pci_dev *pdev, int num_vfs);
void sxe_vt1_configure(struct sxe_adapter *adapter);
void sxe_mailbox_irq_handle(struct sxe_adapter *adapter);
s32 sxe_set_vf_mac(struct net_device *dev, s32 vf_idx, u8 *mac_addr);
s32 sxe_set_vf_spoofchk(struct net_device *dev, s32 vf_idx, bool status);
s32 sxe_set_vf_trust(struct net_device *dev, s32 vf_idx, bool status);
int sxe_set_vf_rss_query_en(struct net_device *dev, s32 vf_idx, bool status);
s32 sxe_get_vf_config(struct net_device *dev, s32 vf_idx,
struct ifla_vf_info *info);
s32 sxe_set_vf_rate(struct net_device *netdev, s32 vf_idx, s32 min_rate,
s32 max_rate);
s32 sxe_vf_req_task_handle(struct sxe_adapter *adapter, u8 vf_idx);
void sxe_vf_ack_task_handle(struct sxe_adapter *adapter, u8 vf_idx);
void sxe_vf_hw_rst(struct sxe_adapter *adapter, u8 vf_idx);
void sxe_vf_down(struct sxe_adapter *adapter);
void sxe_bad_vf_flr(struct sxe_adapter *adapter);
void sxe_spoof_packets_check(struct sxe_adapter *adapter);
bool sxe_vf_tx_pending(struct sxe_adapter *adapter);
void sxe_vf_rate_update(struct sxe_adapter *adapter);
void sxe_link_update_notify_vf_all(struct sxe_adapter *adapter);
void sxe_netdev_down_notify_vf_all(struct sxe_adapter *adapter);
void sxe_vf_trust_update_notify(struct sxe_adapter *adapter, u16 index);
void sxe_param_sriov_enable(struct sxe_adapter *adapter, u8 user_num_vfs);
void sxe_vf_resource_release(struct sxe_adapter *adapter);
void sxe_vf_disable(struct sxe_adapter *adapter);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_tx_proc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_TX_PROC_H__
#define __SXE_TX_PROC_H__
#include "sxe.h"
#include "sxe_ring.h"
#define SXE_IPV4 (4)
#define SXE_IPV6 (6)
#define SXE_ALIGN_4K (4096)
#define SXE_TX_FEATURE_VLAN_PRIO_MASK 0xe0000000
#define SXE_TX_FEATURE_VLAN_PRIO_SHIFT 29
#define SXE_TX_FEATURE_VLAN_SHIFT 16
enum sxe_tx_features {
SXE_TX_FEATURE_HW_VLAN = 0x01,
SXE_TX_FEATURE_TSO = 0x02,
SXE_TX_FEATURE_TSTAMP = 0x04,
SXE_TX_FEATURE_CC = 0x08,
SXE_TX_FEATURE_IPV4 = 0x10,
SXE_TX_FEATURE_CSUM = 0x20,
SXE_TX_FEATURE_IPSEC = 0x40,
SXE_TX_FEATURE_SW_VLAN = 0x80,
};
#define SXE_TX_SET_FLAG(input, flag, result) \
({ \
u32 _input = input; \
u32 _flag = flag; \
u32 _result = result; \
(((_flag) <= (_result)) ? \
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result)))); \
})
union sxe_ip_hdr {
struct iphdr *v4;
struct ipv6hdr *v6;
u8 *hdr;
};
union sxe_l4_hdr {
struct tcphdr *tcp;
u8 *hdr;
};
union app_tr_data_hdr {
u8 *network;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
};
int sxe_tx_configure(struct sxe_adapter *adapter);
void sxe_tx_ring_buffer_clean(struct sxe_ring *ring);
bool sxe_tx_ring_irq_clean(struct sxe_irq_data *irq, struct sxe_ring *ring,
s32 napi_budget);
netdev_tx_t sxe_xmit(struct sk_buff *skb, struct net_device *netdev);
s32 sxe_tx_ring_alloc(struct sxe_ring *ring);
void sxe_tx_ring_free(struct sxe_ring *ring);
void sxe_tx_resources_free(struct sxe_adapter *adapter);
s32 sxe_tx_ring_depth_reset(struct sxe_adapter *adapter, u32 tx_cnt);
void sxe_hw_tx_disable(struct sxe_adapter *adapter);
void sxe_hw_tx_configure(struct sxe_adapter *adapter);
#ifdef HAVE_TIMEOUT_TXQUEUE_IDX
void sxe_tx_timeout(struct net_device *netdev, u32 __always_unused txqueue);
#else
void sxe_tx_timeout(struct net_device *netdev);
#endif
bool sxe_tx_ring_pending(struct sxe_adapter *adapter);
void sxe_tx_buffer_dump(struct sxe_adapter *adapter);
void sxe_tx_ring_attr_configure(struct sxe_adapter *adapter,
struct sxe_ring *ring);
void sxe_tx_ring_reg_configure(struct sxe_adapter *adapter,
struct sxe_ring *ring);
netdev_tx_t sxe_ring_xmit(struct sk_buff *skb, struct net_device *netdev,
struct sxe_ring *ring);
s32 sxe_test_tx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring);
static inline void sxe_tx_buffer_init(struct sxe_ring *ring)
{
memset(ring->tx_buffer_info, 0,
sizeof(struct sxe_tx_buffer) * ring->depth);
}
static inline void
sxe_ctxt_desc_iplen_set(struct sxe_tx_context_desc *ctxt_desc, u32 iplen)
{
ctxt_desc->vlan_macip_lens |= iplen;
}
static inline void
sxe_ctxt_desc_maclen_set(struct sxe_tx_context_desc *ctxt_desc, u32 maclen)
{
ctxt_desc->vlan_macip_lens &= ~SXE_TX_CTXTD_MACLEN_MASK;
ctxt_desc->vlan_macip_lens |= maclen << SXE_TX_CTXTD_MACLEN_SHIFT;
}
static inline void
sxe_ctxt_desc_vlan_tag_set(struct sxe_tx_context_desc *ctxt_desc, u32 vlan_tag)
{
ctxt_desc->vlan_macip_lens |= vlan_tag << SXE_TX_CTXTD_VLAN_SHIFT;
}
static inline void
sxe_ctxt_desc_tucmd_set(struct sxe_tx_context_desc *ctxt_desc, u32 tucmd)
{
ctxt_desc->type_tucmd_mlhl |= tucmd;
}
static inline void
sxe_ctxt_desc_sa_idx_set(struct sxe_tx_context_desc *ctxt_desc, u32 sa_idx)
{
ctxt_desc->sa_idx = sa_idx;
}
static inline void
sxe_ctxt_desc_mss_l4len_set(struct sxe_tx_context_desc *ctxt_desc,
u32 mss_l4len)
{
ctxt_desc->mss_l4len_idx = mss_l4len;
}
static inline __be16
sxe_ctxt_desc_vlan_tag_get(struct sxe_tx_context_desc *ctxt_desc)
{
return (ctxt_desc->vlan_macip_lens >> SXE_TX_CTXTD_VLAN_SHIFT);
}
static inline void sxe_tx_release(struct sxe_adapter *adapter)
{
sxe_tx_resources_free(adapter);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,114 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_xdp.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_XDP_H__
#define __SXE_XDP_H__
#include "sxe.h"
#ifdef HAVE_XDP_SUPPORT
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#endif
#ifndef HAVE_NO_XDP_BUFF_RXQ
#include <net/xdp.h>
#endif
#define SXE_XDP_PASS (0)
#define SXE_XDP_CONSUMED BIT(0)
#define SXE_XDP_TX BIT(1)
#define SXE_XDP_REDIR BIT(2)
#ifdef HAVE_AF_XDP_ZERO_COPY
#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL
#include <net/xdp_sock_drv.h>
#else
#include <net/xdp_sock.h>
#endif
static inline bool sxe_xdp_adapter_enabled(struct sxe_adapter *adapter)
{
return !!adapter->xdp_prog;
}
#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL
bool sxe_xdp_tx_ring_irq_clean(struct sxe_irq_data *irq_data,
struct sxe_ring *tx_ring, int napi_budget);
int sxe_zc_rx_ring_irq_clean(struct sxe_irq_data *irq_data,
struct sxe_ring *rx_ring, const int budget);
int sxe_xsk_async_xmit(struct net_device *dev, u32 qid);
void sxe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle_addr);
void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring);
void sxe_xsk_tx_ring_clean(struct sxe_ring *tx_ring);
void sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 count);
#else
bool sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 count);
s32 sxe_zc_rx_ring_irq_clean(struct sxe_irq_data *irq_data,
struct sxe_ring *rx_ring, const int budget);
void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring);
bool sxe_xdp_tx_ring_irq_clean(struct sxe_irq_data *irq_data,
struct sxe_ring *tx_ring, int napi_budget);
void sxe_xsk_tx_ring_clean(struct sxe_ring *tx_ring);
#endif
#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL
struct xsk_buff_pool *sxe_xsk_pool_get(struct sxe_adapter *adapter,
struct sxe_ring *ring);
#else
struct xdp_umem *sxe_xsk_pool_get(struct sxe_adapter *adapter,
struct sxe_ring *ring);
#endif
#ifdef HAVE_NDO_XSK_WAKEUP
int sxe_xsk_wakeup(struct net_device *dev, u32 qid, u32 __maybe_unused flags);
#endif
#endif
#ifdef HAVE_XDP_SUPPORT
DECLARE_STATIC_KEY_FALSE(sxe_xdp_tx_lock_key);
static inline struct sxe_ring *sxe_xdp_tx_ring_pick(struct sxe_adapter *adapter)
{
s32 cpu = smp_processor_id();
u16 idx = static_key_enabled(&sxe_xdp_tx_lock_key) ?
cpu % SXE_XDP_RING_NUM_MAX :
cpu;
return adapter->xdp_ring_ctxt.ring[idx];
}
void sxe_xdp_ring_tail_update_locked(struct sxe_ring *ring);
int sxe_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int sxe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
u32 sxe_max_xdp_frame_size(struct sxe_adapter *adapter);
#endif
struct sk_buff *sxe_xdp_run(struct sxe_adapter *adapter,
struct sxe_ring *rx_ring, struct xdp_buff *xdp);
#endif

View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
#
# sxevf network device configuration
#
config SXE_VF
tristate "sxevf PCI Express adapters support"
depends on (X86 || ARM64) && PCI
select MDIO
select PHYLIB
select PTP_1588_CLOCK
default m
help
This driver supports sxevf PCI Express family of adapters.
To compile this driver as a module, choose M here. The module
will be called ngbe.

View File

@ -0,0 +1,68 @@
# SPDX-License-Identifier: GPL-2.0
CONFIG_MODULE_SIG=n
# Makefile path
MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST)))
# current path
CURDIR :=$(shell dirname $(MAKEPATH))
ifneq ($(KERNELRELEASE),)
# compile
CONFIG_SXE_VF ?= m
obj-$(CONFIG_SXE_VF) += sxevf.o
sxevf-objs += base/log/sxe_log.o \
base/trace/sxe_trace.o \
sxevf/sxevf_csum.o \
sxevf/sxevf_debug.o \
sxevf/sxevf_ethtool.o \
sxevf/sxevf_hw.o \
sxevf/sxevf_ipsec.o \
sxevf/sxevf_irq.o \
sxevf/sxevf_main.o \
sxevf/sxevf_monitor.o \
sxevf/sxevf_msg.o \
sxevf/sxevf_netdev.o \
sxevf/sxevf_ring.o \
sxevf/sxevf_rx_proc.o \
sxevf/sxevf_tx_proc.o \
sxevf/sxevf_xdp.o
# add compile ccflags and macro
ccflags-y += -Werror
ccflags-y += -I$(CURDIR)/sxevf
ccflags-y += -I$(CURDIR)/include/sxe
ccflags-y += -I$(CURDIR)/include
ccflags-y += -I$(CURDIR)/base/compat
ccflags-y += -I$(CURDIR)/base/trace
ccflags-y += -I$(CURDIR)/base/log
ccflags-y += -DSXE_HOST_DRIVER
ccflags-y += -DSXE_DRIVER_RELEASE
ccflags-$(CONFIG_DCB) += -DSXE_DCB_CONFIGURE
os_type = $(shell sed -n '/^ID=/p' /etc/os-release | awk -F '=' '{print $$2}' | sed 's/\"//g' | sed 's/ID=//g')
ifeq (${os_type}, opencloudos)
ccflags-y += -DOPENCLOUDOS
endif
# get linux kernel version code
ifneq ($(wildcard $(CURDIR)/vercode_build.sh),)
KER_DIR=$(srctree)
SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR))
ifneq ($(SPECIFIC_LINUX),)
ccflags-y += -DSPECIFIC_LINUX
ccflags-y += -D$(SPECIFIC_LINUX)
endif
endif
else # KERNELRELEASE
# kernel build path
KDIR := /lib/modules/$(shell uname -r)/build
all:
@$(MAKE) -C $(KDIR) M=$(CURDIR) modules
clean:
@rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt
@rm -rf ./sxevf/*.o ./sxevf/.*.cmd
@rm -rf ./base/log/*.o ./base/trace/*.o
endif # KERNELRELEASE

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_compat.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_COMPAT_H__
#define __SXE_COMPAT_H__
#include "sxe_compat_gcc.h"
#include <linux/filter.h>
#include <linux/version.h>
#define HAVE_XDP_SUPPORT
#define HAVE_AF_XDP_ZERO_COPY
#define HAVE_MEM_TYPE_XSK_BUFF_POOL
#define HAVE_XDP_BUFF_DATA_META
#define HAVE_XDP_BUFF_FRAME_SIZE
#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS
#define XDP_XMIT_FRAME_FAILED_NEED_FREE
#define HAVE_NETDEV_BPF_XSK_BUFF_POOL
#define HAVE_SKB_XMIT_MORE
#define HAVE_TIMEOUT_TXQUEUE_IDX
#define HAVE_NETDEV_NESTED_PRIV
#define HAVE_NET_PREFETCH_API
#define HAVE_NDO_FDB_ADD_EXTACK
#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
#define HAVE_NDO_SET_VF_LINK_STATE
#define HAVE_NDO_XSK_WAKEUP
#define HAVE_MACVLAN_OFFLOAD_SUPPORT
#define HAVE_PTP_CLOCK_INFO_ADJFINE
#define SXE_LOG_OLD_FS
#define SXE_LOG_FS_NOTIFY
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_compat_gcc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_COMPAT_GCC_H__
#define __SXE_COMPAT_GCC_H__
#ifdef __has_attribute
#if __has_attribute(__fallthrough__)
#define fallthrough __attribute__((__fallthrough__))
#else
#define fallthrough \
do { \
} while (0)
#endif
#else
#define fallthrough \
do { \
} while (0)
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,200 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_log.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef _SXE_LOG_H_
#define _SXE_LOG_H_
#include <linux/stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define LOG_INFO_PREFIX_LEN 32
#define LOG_ERROR_PREFIX_LEN 33
#define MEGABYTE 20
enum debug_level {
LEVEL_ERROR,
LEVEL_WARN,
LEVEL_INFO,
LEVEL_DEBUG,
};
static inline const s8 *sxe_debug_level_name(enum debug_level lv)
{
static const s8 *level[] = {
[LEVEL_ERROR] = "ERROR",
[LEVEL_WARN] = "WARN",
[LEVEL_INFO] = "INFO",
[LEVEL_DEBUG] = "DEBUG",
};
return level[lv];
}
#define LOG_BUG_ON(cond, fmt, ...) \
do { \
if ((cond)) { \
LOG_ERROR(fmt, ##__VA_ARGS__); \
LOG_SYNC(); \
BUG(); \
} \
} while (0)
#define DEBUG_TRACE_MAGIC 0x456789
#define BUF_SIZE (1024LL << 10)
#define PAGE_ORDER 2
#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2))
#define LOG_FILE_SIZE (200LL << 20)
#define BINARY_FILE_SIZE (200LL << 20)
#define VF_LOG_FILE_PATH "/var/log/sxevf.log"
#define VF_LOG_FILE_PREFIX "sxevf.log"
#define VF_BINARY_FILE_PATH "/var/log/sxevf.bin"
#define VF_BINARY_FILE_PREFIX "sxevf.bin"
#define LOG_FILE_PATH "/var/log/sxe.log"
#define LOG_FILE_PREFIX "sxe.log"
#define BINARY_FILE_PATH "/var/log/sxe.bin"
#define BINARY_FILE_PREFIX "sxe.bin"
#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n"
enum {
DEBUG_TYPE_STRING,
DEBUG_TYPE_BINARY,
DEBUG_TYPE_NR,
};
struct debug_func {
struct list_head list;
char name[64];
};
struct debug_file {
struct list_head list;
char name[64];
};
struct sxe_log {
struct {
char *buf;
int buf_size;
long long head;
long long tail;
/* in order to protect the data */
spinlock_t lock;
unsigned char is_drop;
};
struct {
char *file_path;
struct file *file;
long long file_pos;
long long file_size;
u32 file_num;
u32 index;
};
};
struct sxe_thread_local {
s32 magic;
char data[0];
};
struct sxe_ctxt {
struct page *page;
void *buff;
};
struct sxe_thread_key {
s32 offset;
};
struct sxe_debug {
enum debug_level level;
bool status;
u16 key_offset;
struct sxe_ctxt __percpu *ctxt;
struct list_head filter_func;
struct list_head filter_file;
struct task_struct *task;
struct sxe_log log[DEBUG_TYPE_NR];
};
void sxe_level_set(int level);
s32 sxe_level_get(void);
void sxe_bin_status_set(bool status);
s32 sxe_bin_status_get(void);
int sxe_log_init(bool is_vf);
void sxe_log_exit(void);
void sxe_log_binary(const char *file, const char *func, int line, u8 *ptr,
u64 addr, u32 size, char *str);
void sxe_log_sync(void);
#define LOG_DEBUG(fmt, ...)
#define LOG_INFO(fmt, ...)
#define LOG_WARN(fmt, ...)
#define LOG_ERROR(fmt, ...)
#define UNUSED(x) ((void)(x))
#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter)
#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter)
#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter)
#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter)
#define LOG_DEV_DEBUG(format, arg...) \
dev_dbg(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_INFO(format, arg...) \
dev_info(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_WARN(format, arg...) \
dev_warn(&adapter->pdev->dev, format, ##arg)
#define LOG_DEV_ERR(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg)
#define LOG_MSG_DEBUG(msglvl, format, arg...) \
netif_dbg(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_INFO(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_WARN(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_MSG_ERR(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ##arg)
#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: " format, ##arg)
#define LOG_PR_INFO(format, arg...) pr_info("sxe: " format, ##arg)
#define LOG_PR_WARN(format, arg...) pr_warn("sxe: " format, ##arg)
#define LOG_PR_ERR(format, arg...) pr_err("sxe: " format, ##arg)
#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: " format, ##arg)
#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: " format, ##arg)
#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: " format, ##arg)
#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: " format, ##arg)
#define LOG_SYNC()
#define SXE_BUG()
#define SXE_BUG_NO_SYNC()
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,251 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_trace.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifdef SXE_DRIVER_TRACE
#include <linux/device.h>
#include <linux/netdevice.h>
#include "sxe_trace.h"
#include "sxe_ring.h"
#include "sxe_log.h"
#define SXE_FILE_NAME_LEN (256)
#define SXE_TRACE_NS_MASK (0xFFFFFFFF)
#define SXE_TRACE_BUF_CLEAN(buf, buf_size, len) \
do { \
memset(buf, 0, buf_size); \
len = 0; \
} while (0)
struct sxe_trace_tx_ring g_sxe_trace_tx[SXE_TXRX_RING_NUM_MAX] = { { 0 } };
struct sxe_trace_rx_ring g_sxe_trace_rx[SXE_TXRX_RING_NUM_MAX] = { { 0 } };
void sxe_file_close(struct file **file)
{
filp_close(*file, NULL);
*file = NULL;
}
void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab)
{
if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) ||
unlikely(lab >= SXE_TRACE_LAB_TX_MAX))
return;
if (unlikely(lab == 0)) {
g_sxe_trace_tx[ring_idx].next++;
g_sxe_trace_tx[ring_idx].next &= SXE_TRACE_PER_RING_MASK;
memset(&g_sxe_trace_tx[ring_idx]
.timestamp[g_sxe_trace_tx[ring_idx].next],
0, sizeof(g_sxe_trace_tx[ring_idx].timestamp[0]));
}
g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next][lab] =
ktime_get_real_ns() & SXE_TRACE_NS_MASK;
}
void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab)
{
if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) ||
unlikely(lab >= SXE_TRACE_LAB_RX_MAX))
return;
if (unlikely(lab == 0)) {
g_sxe_trace_rx[ring_idx].next++;
g_sxe_trace_rx[ring_idx].next &= SXE_TRACE_PER_RING_MASK;
memset(&g_sxe_trace_rx[ring_idx]
.timestamp[g_sxe_trace_rx[ring_idx].next],
0, sizeof(g_sxe_trace_rx[ring_idx].timestamp[0]));
}
g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next][lab] =
ktime_get_real_ns() & SXE_TRACE_NS_MASK;
}
static int sxe_trace_create_file(struct file **pp_file)
{
char file_name[SXE_FILE_NAME_LEN] = {};
int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE;
int len = 0;
int rc = 0;
struct file *file;
len += snprintf(file_name, sizeof(file_name), "%s.",
SXE_TRACE_DUMP_FILE_NAME);
time_for_file_name(file_name + len, sizeof(file_name) - len);
file = filp_open(file_name, flags_new, 0666);
if (IS_ERR(file)) {
rc = (int)PTR_ERR(file);
sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n",
file_name, rc);
goto l_out;
}
*pp_file = file;
l_out:
return rc;
}
static int sxe_trace_write_file(struct file *file)
{
char *buff;
size_t buff_size = 2048;
int rc = 0;
int len = 0;
u64 spend = 0;
u64 times = 0;
u64 spend_total = 0;
u64 times_total = 0;
u64 start;
u64 end;
u32 i;
u32 j;
u32 k;
buff = kzalloc(buff_size, GFP_KERNEL);
if (!buff) {
rc = -ENOMEM;
sxe_print(KERN_ERR, NULL, "kzalloc %lu failed.\n", buff_size);
goto l_out;
}
len += snprintf(buff + len, buff_size - len, "tx trace dump:\n");
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
for (i = 0; i < ARRAY_SIZE(g_sxe_trace_tx); i++) {
spend = 0;
times = 0;
for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) {
start = g_sxe_trace_tx[i]
.timestamp[j][SXE_TRACE_LAB_TX_START];
end = g_sxe_trace_tx[i]
.timestamp[j][SXE_TRACE_LAB_TX_END];
if (start == 0 || end == 0)
continue;
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"\ttx ring %d trace %d dump:", i, j);
for (k = 0; k < SXE_TRACE_LAB_TX_MAX; k++) {
len += snprintf(buff + len, buff_size - len,
"%llu ", g_sxe_trace_tx[i].timestamp[j][k]);
}
len += snprintf(buff + len, buff_size - len,
"spend: %llu\n", end - start);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend += end - start;
times++;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"tx ring %d, spend %llu, times:%llu.\n", i,
spend, times);
spend_total += spend;
times_total += times;
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"tx trace dump, spend_total: %llu, times_total: %llu.\n",
spend_total, times_total);
len += snprintf(buff + len, buff_size - len, "rx trace dump:\n");
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend_total = 0;
times_total = 0;
for (i = 0; i < ARRAY_SIZE(g_sxe_trace_rx); i++) {
spend = 0;
times = 0;
for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) {
start = g_sxe_trace_rx[i]
.timestamp[j][SXE_TRACE_LAB_RX_START];
end = g_sxe_trace_rx[i]
.timestamp[j][SXE_TRACE_LAB_RX_END];
if (start == 0 || end == 0)
continue;
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"\trx ring %d trace %d dump:", i, j);
for (k = 0; k < SXE_TRACE_LAB_RX_MAX; k++) {
len += snprintf(buff + len, buff_size - len,
"%llu ", g_sxe_trace_rx[i].timestamp[j][k]);
}
len += snprintf(buff + len, buff_size - len,
"spend: %llu\n", end - start);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
spend += end - start;
times++;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"rx ring %d, spend %llu, times:%llu:\n", i,
spend, times);
spend_total += spend;
times_total += times;
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
}
SXE_TRACE_BUF_CLEAN(buff, buff_size, len);
len += snprintf(buff + len, buff_size - len,
"rx trace dump, spend_total: %llu, times_total: %llu.\n",
spend_total, times_total);
rc = sxe_file_write(file, buff, len);
if (rc < 0)
goto l_out;
l_out:
kfree(buff);
if (rc < 0)
sxe_print(KERN_ERR, NULL, "write file error %d\n", rc);
return rc;
}
void sxe_trace_dump(void)
{
struct file *file;
int rc = 0;
rc = sxe_trace_create_file(&file);
if (!file)
goto l_out;
rc = sxe_trace_write_file(file);
if (rc < 0)
goto l_out;
l_out:
if (file)
sxe_file_close(&file);
}
#endif

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_trace.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_TRACE_H__
#define __SXE_TRACE_H__
#ifdef SXE_DRIVER_TRACE
#define SXE_TRACE_NUM_PER_RING (2048)
#define SXE_TRACE_PER_RING_MASK (0x7FF)
#ifndef SXE_TEST
#define SXE_TRACE_DUMP_FILE_NAME ("/var/log/sxe_trace_dump.log")
#else
#define SXE_TRACE_DUMP_FILE_NAME (".sxe_trace_dump.log")
#endif
enum sxe_trace_lab_tx {
SXE_TRACE_LAB_TX_START = 0,
SXE_TRACE_LAB_TX_MAY_STOP,
SXE_TRACE_LAB_TX_VLAN,
SXE_TRACE_LAB_TX_DCB,
SXE_TRACE_LAB_TX_IPSEC,
SXE_TRACE_LAB_TX_TSO,
SXE_TRACE_LAB_TX_DESC,
SXE_TRACE_LAB_TX_PPT,
SXE_TRACE_LAB_TX_FDIR,
SXE_TRACE_LAB_TX_OL_INFO,
SXE_TRACE_LAB_TX_MAP,
SXE_TRACE_LAB_TX_SENT,
SXE_TRACE_LAB_TX_UPDATE,
SXE_TRACE_LAB_TX_MAY_STOP_2,
SXE_TRACE_LAB_TX_WRITE,
SXE_TRACE_LAB_TX_END,
SXE_TRACE_LAB_TX_MAX,
};
struct sxe_trace_tx_ring {
u64 next;
u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_TX_MAX];
};
enum sxe_trace_lab_rx {
SXE_TRACE_LAB_RX_START = 0,
SXE_TRACE_LAB_RX_CLEAN,
SXE_TRACE_LAB_RX_UNMAP,
SXE_TRACE_LAB_RX_STATS,
SXE_TRACE_LAB_RX_HANG,
SXE_TRACE_LAB_RX_DONE,
SXE_TRACE_LAB_RX_WAKE,
SXE_TRACE_LAB_RX_END,
SXE_TRACE_LAB_RX_MAX,
};
struct sxe_trace_rx_ring {
u64 next;
u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_RX_MAX];
};
void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab);
void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab);
void sxe_trace_dump(void);
#define SXE_TRACE_TX(r_idx, lab) sxe_trace_tx_add(r_idx, lab)
#define SXE_TRACE_RX(r_idx, lab) sxe_trace_rx_add(r_idx, lab)
#else
#define SXE_TRACE_TX(r_idx, lab)
#define SXE_TRACE_RX(r_idx, lab)
#endif
#endif

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: drv_msg.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __DRV_MSG_H__
#define __DRV_MSG_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_VERSION_LEN 32
struct sxe_version_resp {
u8 fw_version[SXE_VERSION_LEN];
};
#endif

View File

@ -0,0 +1,224 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_cli.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_CLI_H__
#define __SXE_CLI_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_VERION_LEN (32)
#define SXE_MAC_NUM (128)
#define SXE_PORT_TRANSCEIVER_LEN (32)
#define SXE_PORT_VENDOR_LEN (32)
#define SXE_CHIP_TYPE_LEN (32)
#define SXE_VPD_SN_LEN (16)
#define SXE_SOC_RST_TIME (0x93A80)
#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)
#define MGC_TERMLOG_INFO_MAX_LEN (12 * 1024)
#define SXE_REGS_DUMP_MAX_LEN (12 * 1024)
#define SXE_PRODUCT_NAME_LEN (32)
enum sxe_led_mode {
SXE_IDENTIFY_LED_BLINK_ON = 0,
SXE_IDENTIFY_LED_BLINK_OFF,
SXE_IDENTIFY_LED_ON,
SXE_IDENTIFY_LED_OFF,
SXE_IDENTIFY_LED_RESET,
};
struct sxe_led_ctrl {
u32 mode;
u32 duration;
};
struct sxe_led_ctrl_resp {
u32 ack;
};
enum portlinkspeed {
PORT_LINK_NO = 0,
PORT_LINK_100M = 1,
PORT_LINK_1G = 2,
PORT_LINK_10G = 3,
};
struct syssocinfo {
s8 fwver[SXE_VERION_LEN];
s8 optver[SXE_VERION_LEN];
u8 socstatus;
u8 pad[3];
s32 soctemp;
u64 chipid;
s8 chiptype[SXE_CHIP_TYPE_LEN];
s8 pba[SXE_VPD_SN_LEN];
s8 productname[SXE_PRODUCT_NAME_LEN];
};
struct sysportinfo {
u64 mac[SXE_MAC_NUM];
u8 isportabs;
u8 linkstat;
u8 linkspeed;
u8 issfp : 1;
u8 isgetinfo : 1;
u8 rvd : 6;
s8 opticalmodtemp;
u8 pad[3];
s8 transceivertype[SXE_PORT_TRANSCEIVER_LEN];
s8 vendorname[SXE_PORT_VENDOR_LEN];
s8 vendorpn[SXE_PORT_VENDOR_LEN];
};
struct sysinforesp {
struct syssocinfo socinfo;
struct sysportinfo portinfo;
};
enum sfptemptdmode {
SFP_TEMP_THRESHOLD_MODE_ALARM = 0,
SFP_TEMP_THRESHOLD_MODE_WARN,
};
struct sfptemptdset {
u8 mode;
u8 pad[3];
s8 hthreshold;
s8 lthreshold;
};
struct sxelogexportresp {
u16 curloglen;
u8 isend;
u8 pad;
s32 sessionid;
s8 data[0];
};
enum sxelogexporttype {
SXE_LOG_EXPORT_REQ = 0,
SXE_LOG_EXPORT_FIN,
SXE_LOG_EXPORT_ABORT,
};
struct sxelogexportreq {
u8 isalllog;
u8 cmdtype;
u8 isbegin;
u8 pad;
s32 sessionid;
u32 loglen;
};
struct socrstreq {
u32 time;
};
struct regsdumpresp {
u32 curdwlen;
u8 data[0];
};
enum {
SXE_MFG_PART_NUMBER_LEN = 8,
SXE_MFG_SERIAL_NUMBER_LEN = 16,
SXE_MFG_REVISION_LEN = 4,
SXE_MFG_OEM_STR_LEN = 64,
SXE_MFG_SXE_BOARD_ASSEMBLY_LEN = 32,
SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
SXE_MFG_SXE_MAC_ADDR_CNT = 2,
};
struct sxemfginfo {
u8 partnumber[SXE_MFG_PART_NUMBER_LEN];
u8 serialnumber[SXE_MFG_SERIAL_NUMBER_LEN];
u32 mfgdate;
u8 revision[SXE_MFG_REVISION_LEN];
u32 reworkdate;
u8 pad[4];
u64 macaddr[SXE_MFG_SXE_MAC_ADDR_CNT];
u8 boardtracenum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN];
u8 boardassembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];
u8 extra1[SXE_MFG_OEM_STR_LEN];
u8 extra2[SXE_MFG_OEM_STR_LEN];
};
struct sxelldpinfo {
u8 lldpstate;
u8 pad[3];
};
struct regsdumpreq {
u32 baseaddr;
u32 dwlen;
};
enum sxe_pcs_mode {
SXE_PCS_MODE_1000BASE_KX_WO = 0,
SXE_PCS_MODE_1000BASE_KX_W,
SXE_PCS_MODE_SGMII,
SXE_PCS_MODE_10GBASE_KR_WO,
SXE_PCS_MODE_AUTO_NEGT_73,
SXE_PCS_MODE_LPBK_PHY_TX2RX,
SXE_PCS_MODE_LPBK_PHY_RX2TX,
SXE_PCS_MODE_LPBK_PCS_RX2TX,
SXE_PCS_MODE_BUTT,
};
enum sxe_remote_fault {
SXE_REMOTE_FALUT_NO_ERROR = 0,
SXE_REMOTE_FALUT_OFFLINE,
SXE_REMOTE_FALUT_LINK_FAILURE,
SXE_REMOTE_FALUT_AUTO_NEGOTIATION,
SXE_REMOTE_UNKNOWN,
};
struct sxe_phy_cfg {
enum sxe_pcs_mode mode;
u32 mtu;
};
enum sxe_an_speed {
SXE_AN_SPEED_NO_LINK = 0,
SXE_AN_SPEED_100M,
SXE_AN_SPEED_1G,
SXE_AN_SPEED_10G,
SXE_AN_SPEED_UNKNOWN,
};
enum sxe_phy_pause_cap {
SXE_PAUSE_CAP_NO_PAUSE = 0,
SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,
SXE_PAUSE_CAP_SYMMETRIC_PAUSE,
SXE_PAUSE_CAP_BOTH_PAUSE,
SXE_PAUSE_CAP_UNKNOWN,
};
enum sxe_phy_duplex_type {
SXE_FULL_DUPLEX = 0,
SXE_HALF_DUPLEX = 1,
SXE_UNKNOWN_DUPLEX,
};
struct sxe_phy_an_cap {
enum sxe_remote_fault remote_fault;
enum sxe_phy_pause_cap pause_cap;
enum sxe_phy_duplex_type duplex_cap;
};
struct sxe_an_cap {
struct sxe_phy_an_cap local;
struct sxe_phy_an_cap peer;
};
#endif

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_hdc.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_HDC_H__
#define __SXE_HDC_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define HDC_CACHE_TOTAL_LEN (16 * 1024)
#define ONE_PACKET_LEN_MAX (1024)
#define DWORD_NUM (256)
#define HDC_TRANS_RETRY_COUNT (3)
enum sxehdcerrnocode {
PKG_OK = 0,
PKG_ERR_REQ_LEN,
PKG_ERR_RESP_LEN,
PKG_ERR_PKG_SKIP,
PKG_ERR_NODATA,
PKG_ERR_PF_LK,
PKG_ERR_OTHER,
};
union hdcheader {
struct {
u8 pid : 4;
u8 errcode : 4;
u8 len;
u16 startpkg : 1;
u16 endpkg : 1;
u16 isrd : 1;
u16 msi : 1;
u16 totallen : 12;
} head;
u32 dw0;
};
#endif

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_ioctl.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef _SXE_IOCTL_H_
#define _SXE_IOCTL_H_
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
struct sxeioctlsynccmd {
u64 traceid;
void *indata;
u32 inlen;
void *outdata;
u32 outlen;
};
#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct sxeioctlsynccmd)
#endif

View File

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_msg.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_MSG_H__
#define __SXE_MSG_H__
#ifdef SXE_HOST_DRIVER
#include <linux/types.h>
#endif
#define SXE_MAC_ADDR_LEN 6
#define SXE_HDC_CMD_HDR_SIZE sizeof(struct sxe_hdc_cmd_hdr)
#define SXE_HDC_MSG_HDR_SIZE sizeof(struct sxe_hdc_drv_cmd_msg)
enum sxe_cmd_type {
SXE_CMD_TYPE_CLI,
SXE_CMD_TYPE_DRV,
SXE_CMD_TYPE_UNKNOWN,
};
struct sxe_hdc_cmd_hdr {
u8 cmd_type;
u8 cmd_sub_type;
u8 reserve[6];
};
enum sxefwstate {
SXE_FW_START_STATE_UNDEFINED = 0x00,
SXE_FW_START_STATE_INIT_BASE = 0x10,
SXE_FW_START_STATE_SCAN_DEVICE = 0x20,
SXE_FW_START_STATE_FINISHED = 0x30,
SXE_FW_START_STATE_UPGRADE = 0x31,
SXE_FW_RUNNING_STATE_ABNOMAL = 0x40,
SXE_FW_START_STATE_MASK = 0xF0,
};
struct sxefwstateinfo {
u8 socstatus;
char statbuff[32];
};
enum msievt {
MSI_EVT_SOC_STATUS = 0x1,
MSI_EVT_HDC_FWOV = 0x2,
MSI_EVT_HDC_TIME_SYNC = 0x4,
MSI_EVT_MAX = 0x80000000,
};
enum sxefwhdcstate {
SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
SXE_FW_HDC_TRANSACTION_BUSY,
SXE_FW_HDC_TRANSACTION_ERR,
};
enum sxe_hdc_cmd_opcode {
SXE_CMD_SET_WOL = 1,
SXE_CMD_LED_CTRL,
SXE_CMD_SFP_READ,
SXE_CMD_SFP_WRITE,
SXE_CMD_TX_DIS_CTRL = 5,
SXE_CMD_TINE_SYNC,
SXE_CMD_RATE_SELECT,
SXE_CMD_R0_MAC_GET,
SXE_CMD_LOG_EXPORT,
SXE_CMD_FW_VER_GET = 10,
SXE_CMD_PCS_SDS_INIT,
SXE_CMD_AN_SPEED_GET,
SXE_CMD_AN_CAP_GET,
SXE_CMD_GET_SOC_INFO,
SXE_CMD_MNG_RST = 15,
SXE_CMD_MAX,
};
enum sxe_hdc_cmd_errcode {
SXE_ERR_INVALID_PARAM = 1,
};
struct sxe_hdc_drv_cmd_msg {
u16 opcode;
u16 errcode;
union datalength {
u16 req_len;
u16 ack_len;
} length;
u8 reserve[8];
u64 traceid;
u8 body[0];
};
struct sxe_sfp_rw_req {
u16 offset;
u16 len;
u8 write_data[0];
};
struct sxe_sfp_read_resp {
u16 len;
u8 resp[0];
};
enum sxe_sfp_rate {
SXE_SFP_RATE_1G = 0,
SXE_SFP_RATE_10G = 1,
};
struct sxe_sfp_rate_able {
enum sxe_sfp_rate rate;
};
struct sxe_spp_tx_able {
bool isdisable;
};
struct sxe_default_mac_addr_resp {
u8 addr[SXE_MAC_ADDR_LEN];
};
struct sxe_mng_rst {
bool enable;
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_version.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_VER_H__
#define __SXE_VER_H__
#define SXE_VERSION "1.5.0.30"
#define SXE_COMMIT_ID "cd8fdce"
#define SXE_BRANCH "develop/rc/sagitta-1.5.0_B030-Anolis"
#define SXE_BUILD_TIME "2025-05-13 20:33:58"
#define SXE_DRV_NAME "sxe"
#define SXEVF_DRV_NAME "sxevf"
#define SXE_DRV_LICENSE "GPL v2"
#define SXE_DRV_AUTHOR "sxe"
#define SXEVF_DRV_AUTHOR "sxevf"
#define SXE_DRV_DESCRIPTION "sxe driver"
#define SXEVF_DRV_DESCRIPTION "sxevf driver"
#define SXE_FW_NAME "soc"
#define SXE_FW_ARCH "arm32"
#ifndef PS3_CFG_RELEASE
#define PS3_SXE_FW_BUILD_MODE "debug"
#else
#define PS3_SXE_FW_BUILD_MODE "release"
#endif
#endif

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxe_errno.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_ERRNO_H__
#define __SXE_ERRNO_H__
#define SXE_ERR_MODULE_STANDARD 0
#define SXE_ERR_MODULE_PF 1
#define SXE_ERR_MODULE_VF 2
#define SXE_ERR_MODULE_HDC 3
#define SXE_ERR_MODULE_OFFSET 16
#define SXE_ERR_MODULE(module, errcode) \
(((module) << SXE_ERR_MODULE_OFFSET) | (errcode))
#define SXE_ERR_PF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode)
#define SXE_ERR_VF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
#define SXE_ERR_HDC(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
#define SXE_ERR_CONFIG EINVAL
#define SXE_ERR_PARAM EINVAL
#define SXE_ERR_RESET_FAILED EPERM
#define SXE_ERR_NO_SPACE ENOSPC
#define SXE_ERR_FNAV_CMD_INCOMPLETE EBUSY
#define SXE_ERR_MBX_LOCK_FAIL EBUSY
#define SXE_ERR_OPRATION_NOT_PERM EPERM
#define SXE_ERR_LINK_STATUS_INVALID EINVAL
#define SXE_ERR_LINK_SPEED_INVALID EINVAL
#define SXE_ERR_DEVICE_NOT_SUPPORTED EOPNOTSUPP
#define SXE_ERR_HDC_LOCK_BUSY EBUSY
#define SXE_ERR_HDC_FW_OV_TIMEOUT ETIMEDOUT
#define SXE_ERR_MDIO_CMD_TIMEOUT ETIMEDOUT
#define SXE_ERR_INVALID_LINK_SETTINGS EINVAL
#define SXE_ERR_FNAV_REINIT_FAILED EIO
#define SXE_ERR_CLI_FAILED EIO
#define SXE_ERR_MASTER_REQUESTS_PENDING SXE_ERR_PF(1)
#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT SXE_ERR_PF(2)
#define SXE_ERR_ENABLE_SRIOV_FAIL SXE_ERR_PF(3)
#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_PF(4)
#define SXE_ERR_SFP_NOT_PERSENT SXE_ERR_PF(5)
#define SXE_ERR_PHY_NOT_PERSENT SXE_ERR_PF(6)
#define SXE_ERR_PHY_RESET_FAIL SXE_ERR_PF(7)
#define SXE_ERR_FC_NOT_NEGOTIATED SXE_ERR_PF(8)
#define SXE_ERR_SFF_NOT_SUPPORTED SXE_ERR_PF(9)
#define SXEVF_ERR_MAC_ADDR_INVALID EINVAL
#define SXEVF_ERR_RESET_FAILED EIO
#define SXEVF_ERR_ARGUMENT_INVALID EINVAL
#define SXEVF_ERR_NOT_READY EBUSY
#define SXEVF_ERR_POLL_ACK_FAIL EIO
#define SXEVF_ERR_POLL_MSG_FAIL EIO
#define SXEVF_ERR_MBX_LOCK_FAIL EBUSY
#define SXEVF_ERR_REPLY_INVALID EINVAL
#define SXEVF_ERR_IRQ_NUM_INVALID EINVAL
#define SXEVF_ERR_PARAM EINVAL
#define SXEVF_ERR_MAILBOX_FAIL SXE_ERR_VF(1)
#define SXEVF_ERR_MSG_HANDLE_ERR SXE_ERR_VF(2)
#define SXEVF_ERR_DEVICE_NOT_SUPPORTED SXE_ERR_VF(3)
#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_VF(4)
#endif

View File

@ -0,0 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_H__
#define __SXEVF_H__
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
#include <linux/if_vlan.h>
#include "sxe_log.h"
#include "sxevf_hw.h"
#include "sxevf_ring.h"
#include "sxevf_irq.h"
#include "sxevf_monitor.h"
#include "sxevf_ipsec.h"
#include "sxe_errno.h"
#include "sxe_compat.h"
#include "sxe_errno.h"
#define SXEVF_JUMBO_FRAME_SIZE_MAX 9728
#define SXEVF_ETH_DEAD_LOAD (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)
#define DEV_NAME_LEN 16
#define CHAR_BITS 8
#define SXEVF_HZ_TRANSTO_MS 1000
#define SXEVF_KFREE(addr) \
do { \
void *_addr = (addr); \
kfree(_addr); \
_addr = NULL; \
} while (0)
enum {
SXEVF_DCB_ENABLE = BIT(0),
SXEVF_RX_LEGACY_ENABLE = BIT(1),
};
enum sxevf_boards {
SXE_BOARD_VF,
SXE_BOARD_VF_HV,
};
struct sxevf_sw_stats {
u64 tx_busy;
u64 restart_queue;
u64 tx_timeout_count;
u64 hw_csum_rx_error;
u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
};
struct sxevf_stats {
struct sxevf_sw_stats sw;
struct sxevf_hw_stats hw;
};
enum sxevf_nic_state {
SXEVF_RESETTING,
SXEVF_TESTING,
SXEVF_DOWN,
SXEVF_DISABLED,
SXEVF_REMOVING,
};
struct sxevf_mac_filter_context {
u8 cur_uc_addr[ETH_ALEN];
u8 def_uc_addr[ETH_ALEN];
u8 mc_filter_type;
};
struct sxevf_adapter {
char dev_name[DEV_NAME_LEN];
struct net_device *netdev;
struct pci_dev *pdev;
u32 sw_mtu;
u16 msg_enable;
struct sxevf_ring_context rx_ring_ctxt;
struct sxevf_ring_context tx_ring_ctxt;
struct sxevf_ring_context xdp_ring_ctxt;
#ifdef SXE_IPSEC_CONFIGURE
struct sxevf_ipsec_context ipsec_ctxt;
#endif
struct sxevf_mac_filter_context mac_filter_ctxt;
struct sxevf_irq_context irq_ctxt;
struct sxevf_monitor_context monitor_ctxt;
struct sxevf_ring_feature ring_f;
u32 cap;
u32 cap2;
u8 tcs;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct sxevf_hw hw;
struct bpf_prog *xdp_prog;
/* in order to protect the data */
spinlock_t mbx_lock;
u32 mbx_version;
unsigned long state;
struct sxevf_stats stats;
struct sxevf_link_info link;
};
struct workqueue_struct *sxevf_wq_get(void);
s32 sxevf_dev_reset(struct sxevf_hw *hw);
void sxevf_start_adapter(struct sxevf_adapter *adapter);
void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter);
s32 sxevf_ring_irq_init(struct sxevf_adapter *adapter);
void sxevf_ring_irq_exit(struct sxevf_adapter *adapter);
void sxevf_save_reset_stats(struct sxevf_adapter *adapter);
void sxevf_last_counter_stats_init(struct sxevf_adapter *adapter);
#endif

View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_csum.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netdev_features.h>
#include "sxevf_csum.h"
#include "sxevf_ring.h"
#include "sxevf_tx_proc.h"
#include "sxe_log.h"
#ifndef HAVE_SKB_CSUM_SCTP_API
static inline bool sxevf_is_sctp_ipv4(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
if (protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_SCTP) {
LOG_DEBUG("protocal:%d tx packet type is ipv4 sctp.\n",
protocol);
ret = true;
}
return ret;
}
static inline bool sxevf_is_sctp_ipv6(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
u32 offset = skb_checksum_start_offset(skb);
u32 hdr_offset = 0;
ipv6_find_hdr(skb, &hdr_offset, IPPROTO_SCTP, NULL, NULL);
if (protocol == htons(ETH_P_IPV6) && offset == hdr_offset) {
LOG_DEBUG("protocal:%d offset:%d tx packet type is ipv6 sctp.\n",
protocol, offset);
ret = true;
}
return ret;
}
static inline bool sxevf_prot_is_sctp(__be16 protocol, struct sk_buff *skb)
{
bool ret = false;
if (sxevf_is_sctp_ipv4(protocol, skb) ||
sxevf_is_sctp_ipv6(protocol, skb)) {
ret = true;
}
return ret;
}
#else
#define sxevf_prot_is_sctp(protocol, skb) skb_csum_is_sctp(skb)
#endif
void sxevf_tx_csum_offload(struct sxevf_ring *tx_ring,
struct sxevf_tx_buffer *first,
struct sxevf_tx_context_desc *ctxt_desc)
{
struct sk_buff *skb = first->skb;
u16 tucmd;
u16 ip_len;
u16 mac_len;
struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev);
LOG_DEBUG_BDF("tx ring[%d] ip_summed:%d\n"
"\tcsum_offset:%d csum_start:%d protocol:%d\n"
"\tnetdev features:0x%llx\n",
tx_ring->idx, skb->ip_summed, skb->csum_offset,
skb->csum_start, skb->protocol,
tx_ring->netdev->features);
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto no_checksum;
switch (skb->csum_offset) {
case SXEVF_TCP_CSUM_OFFSET:
tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_TCP;
break;
case SXEVF_UDP_CSUM_OFFSET:
tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_UDP;
break;
case SXEVF_SCTP_CSUM_OFFSET:
if (sxevf_prot_is_sctp(first->protocol, skb)) {
tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_SCTP;
break;
}
fallthrough;
default:
skb_checksum_help(skb);
goto no_checksum;
}
if (first->protocol == htons(ETH_P_IP))
tucmd |= SXEVF_TX_CTXTD_TUCMD_IPV4;
first->tx_features |= SXEVF_TX_FEATURE_CSUM;
ip_len = skb_checksum_start_offset(skb) - skb_network_offset(skb);
mac_len = skb_network_offset(skb);
sxevf_ctxt_desc_tucmd_set(ctxt_desc, tucmd);
sxevf_ctxt_desc_iplen_set(ctxt_desc, ip_len);
sxevf_ctxt_desc_maclen_set(ctxt_desc, mac_len);
LOG_DEBUG_BDF("tx ring[%d] protocol:%d tucmd:0x%x\n"
"\tiplen:0x%x mac_len:0x%x, tx_features:0x%x\n",
tx_ring->idx, first->protocol, tucmd, ip_len, mac_len,
first->tx_features);
no_checksum:
;
}
void sxevf_rx_csum_verify(struct sxevf_ring *ring,
union sxevf_rx_data_desc *desc, struct sk_buff *skb)
{
LOG_DEBUG("rx ring[%d] csum verify ip_summed:%d\n"
"\tcsum_offset:%d csum_start:%d pkt_info:0x%x\n"
"\tnetdev feature:0x%llx\n",
ring->idx, skb->ip_summed, skb->csum_offset, skb->csum_start,
desc->wb.lower.lo_dword.hs_rss.pkt_info,
ring->netdev->features);
skb_checksum_none_assert(skb);
if (!(ring->netdev->features & NETIF_F_RXCSUM)) {
LOG_WARN("rx ring[%d] checksum verify no offload\n"
"\tip_summed:%d csum_offset:%d csum_start:%d protocol:0x%x\n",
ring->idx, skb->ip_summed, skb->csum_offset,
skb->csum_start, skb->protocol);
goto l_out;
}
if (sxevf_status_err_check(desc, SXEVF_RXD_STAT_IPCS) &&
sxevf_status_err_check(desc, SXEVF_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++;
LOG_ERROR("rx ring [%d] ip checksum fail.csum_err:%llu\n",
ring->idx, ring->rx_stats.csum_err);
goto l_out;
}
if (sxevf_status_err_check(desc, SXEVF_RXD_STAT_LB)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
goto l_out;
}
if (!sxevf_status_err_check(desc, SXEVF_RXD_STAT_L4CS)) {
LOG_DEBUG("rx ring[%d] no need verify L4 checksum\n",
ring->idx);
goto l_out;
}
if (sxevf_status_err_check(desc, SXEVF_RXDADV_ERR_L4E)) {
ring->rx_stats.csum_err++;
LOG_ERROR("rx ring[%d] L4 checksum verify error.\n", ring->idx);
goto l_out;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
l_out:
;
}

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_csum.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_CSUM_H__
#define __SXEVF_CSUM_H__
#include <net/ipv6.h>
#include <net/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include "sxevf.h"
#include "sxevf_ring.h"
#ifdef NOT_INCLUDE_SCTP_H
struct sctphdr {
__be16 source;
__be16 dest;
__be32 vtag;
__le32 checksum;
} __packed;
#else
#include <linux/sctp.h>
#endif
#define SXEVF_TCP_CSUM_OFFSET (offsetof(struct tcphdr, check))
#define SXEVF_UDP_CSUM_OFFSET (offsetof(struct udphdr, check))
#define SXEVF_SCTP_CSUM_OFFSET (offsetof(struct sctphdr, checksum))
void sxevf_tx_csum_offload(struct sxevf_ring *tx_ring,
struct sxevf_tx_buffer *first,
struct sxevf_tx_context_desc *ctxt_desc);
void sxevf_rx_csum_verify(struct sxevf_ring *ring,
union sxevf_rx_data_desc *desc, struct sk_buff *skb);
#endif

View File

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_debug.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/highmem.h>
#include "sxevf_debug.h"
#define SKB_DESCRIPTION_LEN 256
void sxevf_dump_skb(struct sk_buff *skb)
{
#ifndef SXE_DRIVER_RELEASE
u32 len = skb->len;
u32 data_len = skb->data_len;
#endif
s8 desc[SKB_DESCRIPTION_LEN] = {};
snprintf(desc, SKB_DESCRIPTION_LEN, "skb addr:%p %s", skb,
"linear region");
#ifndef SXE_DRIVER_RELEASE
sxe_log_binary(__FILE__, __func__, __LINE__, (u8 *)skb->data,
(u64)skb, min_t(u32, len - data_len, 256), desc);
#endif
}

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_debug.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXE_DEBUG_H__
#define __SXE_DEBUG_H__
#include <linux/skbuff.h>
#include "sxe_log.h"
void sxevf_dump_skb(struct sk_buff *skb);
#if defined SXE_DRIVER_RELEASE
#define SKB_DUMP(skb)
#else
#define SKB_DUMP(skb) sxevf_dump_skb(skb)
#endif
#endif

View File

@ -0,0 +1,726 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_ethtool.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include "sxe_version.h"
#include "sxevf_ethtool.h"
#include "sxevf_hw.h"
#include "sxe_log.h"
#include "sxevf_tx_proc.h"
#include "sxevf_rx_proc.h"
#include "sxevf_netdev.h"
#include "sxevf_msg.h"
#include "sxevf_irq.h"
#include "sxevf_ring.h"
#define SXEVF_DIAG_REGS_TEST 0
#define SXEVF_DIAG_LINK_TEST 1
#define SXEVF_TEST_SLEEP_TIME 4
#define SXEVF_ETHTOOL_DUMP_REGS_NUM (sxevf_reg_dump_num_get())
#define SXEVF_ETHTOOL_DUMP_REGS_LEN (SXEVF_ETHTOOL_DUMP_REGS_NUM * sizeof(u32))
static const char sxevf_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Link test (on/offline)"
};
static const struct sxevf_ethtool_stats sxevf_gstrings_stats[] = {
{ "rx_packets", NETDEV_STATS,
sizeof(((struct net_device_stats *)0)->rx_packets),
offsetof(struct net_device_stats, rx_packets) },
{ "tx_packets", NETDEV_STATS,
sizeof(((struct net_device_stats *)0)->tx_packets),
offsetof(struct net_device_stats, tx_packets) },
{ "rx_bytes", NETDEV_STATS,
sizeof(((struct net_device_stats *)0)->rx_bytes),
offsetof(struct net_device_stats, rx_bytes) },
{ "tx_bytes", NETDEV_STATS,
sizeof(((struct net_device_stats *)0)->tx_bytes),
offsetof(struct net_device_stats, tx_bytes) },
{ "multicast", NETDEV_STATS,
sizeof(((struct net_device_stats *)0)->multicast),
offsetof(struct net_device_stats, multicast) },
{ "tx_busy", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.tx_busy),
offsetof(struct sxevf_adapter, stats.sw.tx_busy)},
{ "tx_restart_queue", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.restart_queue),
offsetof(struct sxevf_adapter, stats.sw.restart_queue)},
{ "tx_timeout_count", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.tx_timeout_count),
offsetof(struct sxevf_adapter, stats.sw.tx_timeout_count)},
{ "rx_csum_offload_errors", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.hw_csum_rx_error),
offsetof(struct sxevf_adapter, stats.sw.hw_csum_rx_error)},
{ "alloc_rx_page", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.alloc_rx_page),
offsetof(struct sxevf_adapter, stats.sw.alloc_rx_page)},
{ "alloc_rx_page_failed", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.alloc_rx_page_failed),
offsetof(struct sxevf_adapter, stats.sw.alloc_rx_page_failed)},
{ "alloc_rx_buff_failed", SXEVF_STATS,
sizeof(((struct sxevf_adapter *)0)->stats.sw.alloc_rx_buff_failed),
offsetof(struct sxevf_adapter, stats.sw.alloc_rx_buff_failed)},
};
static const char sxevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC
"legacy-rx",
#endif
};
u32 sxevf_self_test_suite_num_get(void)
{
return sizeof(sxevf_gstrings_test) / ETH_GSTRING_LEN;
}
u32 sxevf_stats_num_get(void)
{
return ARRAY_SIZE(sxevf_gstrings_stats);
}
u32 sxevf_priv_flags_num_get(void)
{
return ARRAY_SIZE(sxevf_priv_flags_strings);
}
static void sxevf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, SXEVF_DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, SXE_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = SXEVF_PRIV_FLAGS_STR_LEN;
}
#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
static void sxevf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam __always_unused *kernel_ring,
struct netlink_ext_ack __always_unused *extack)
#else
static void sxevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
#endif
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
ring->rx_max_pending = SXEVF_DESC_CNT_MAX;
ring->tx_max_pending = SXEVF_DESC_CNT_MAX;
ring->rx_pending = adapter->rx_ring_ctxt.ring[0]->depth;
ring->tx_pending = adapter->tx_ring_ctxt.ring[0]->depth;
}
static inline bool sxevf_ringparam_changed(struct sxevf_adapter *adapter,
struct ethtool_ringparam *ring,
u32 *tx_cnt, u32 *rx_cnt)
{
bool changed = true;
*tx_cnt = clamp_t(u32, ring->tx_pending, SXEVF_DESC_CNT_MIN,
SXEVF_DESC_CNT_MAX);
*tx_cnt = ALIGN(*tx_cnt, SXEVF_REQ_DESCRIPTOR_MULTIPLE);
*rx_cnt = clamp_t(u32, ring->rx_pending, SXEVF_DESC_CNT_MIN,
SXEVF_DESC_CNT_MAX);
*rx_cnt = ALIGN(*rx_cnt, SXEVF_REQ_DESCRIPTOR_MULTIPLE);
if ((*tx_cnt == adapter->tx_ring_ctxt.depth) &&
(*rx_cnt == adapter->rx_ring_ctxt.depth))
changed = false;
return changed;
}
static inline void sxevf_ring_depth_set(struct sxevf_adapter *adapter,
u32 tx_cnt, u32 rx_cnt)
{
u32 i;
struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring;
struct sxevf_ring **rx_ring = adapter->rx_ring_ctxt.ring;
struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring;
for (i = 0; i < adapter->tx_ring_ctxt.num; i++)
tx_ring[i]->depth = tx_cnt;
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++)
xdp_ring[i]->depth = tx_cnt;
for (i = 0; i < adapter->rx_ring_ctxt.num; i++)
rx_ring[i]->depth = rx_cnt;
adapter->tx_ring_ctxt.depth = tx_cnt;
adapter->xdp_ring_ctxt.depth = tx_cnt;
adapter->rx_ring_ctxt.depth = rx_cnt;
}
#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
static int sxevf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *user_param,
struct kernel_ethtool_ringparam __always_unused *kernel_ring,
struct netlink_ext_ack __always_unused *extack)
#else
static int sxevf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *user_param)
#endif
{
int ret = 0;
u32 new_rx_count, new_tx_count;
struct sxevf_adapter *adapter = netdev_priv(netdev);
if (user_param->rx_mini_pending || user_param->rx_jumbo_pending) {
LOG_ERROR_BDF("dont support set rx_mini_pending=%u or rx_jumbo_pending=%u\n",
user_param->rx_mini_pending,
user_param->rx_jumbo_pending);
ret = -EINVAL;
goto l_end;
}
if (!sxevf_ringparam_changed(adapter, user_param, &new_tx_count,
&new_rx_count)) {
LOG_DEBUG_BDF("ring depth dont change, tx_depth=%u, rx_depth=%u\n",
new_tx_count, new_rx_count);
goto l_end;
}
while (test_and_set_bit(SXEVF_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
sxevf_ring_depth_set(adapter, new_tx_count, new_rx_count);
goto l_clear;
}
sxevf_down(adapter);
if (new_tx_count != adapter->tx_ring_ctxt.depth) {
ret = sxevf_tx_ring_depth_reset(adapter, new_tx_count);
if (ret < 0)
goto l_up;
}
if (new_rx_count != adapter->rx_ring_ctxt.depth)
ret = sxevf_rx_ring_depth_reset(adapter, new_rx_count);
l_up:
sxevf_up(adapter);
l_clear:
clear_bit(SXEVF_RESETTING, &adapter->state);
l_end:
return ret;
}
static void sxevf_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
ch->other_count = SXEVF_NON_QUEUE_IRQ_NUM;
ch->max_other = SXEVF_NON_QUEUE_IRQ_NUM;
if (adapter->cap & SXEVF_DCB_ENABLE) {
ch->combined_count = SXEVF_TXRX_RING_NUM_DEFAULT;
ch->max_combined = SXEVF_TXRX_RING_NUM_DEFAULT;
} else {
ch->combined_count = SXEVF_RSS_RING_NUM_MAX;
ch->max_combined = SXEVF_RSS_RING_NUM_MAX;
}
}
static void sxevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u32 i;
char *p = (char *)data;
struct sxevf_adapter *adapter = netdev_priv(netdev);
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *sxevf_gstrings_test,
SXEVF_TEST_GSTRING_ARRAY_SIZE * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (i = 0; i < SXEVF_STATS_ARRAY_SIZE; i++) {
memcpy(p, sxevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->tx_ring_ctxt.num; i++) {
sprintf(p, "tx_ring_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_ring_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) {
sprintf(p, "xdp_ring_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "xdp_ring_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->rx_ring_ctxt.num; i++) {
sprintf(p, "rx_ring_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_ring_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, sxevf_priv_flags_strings,
SXEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
default:
break;
}
}
static int sxevf_get_sset_count(struct net_device *netdev, int sset)
{
int ret;
switch (sset) {
case ETH_SS_TEST:
ret = SXEVF_TEST_GSTRING_ARRAY_SIZE;
break;
case ETH_SS_STATS:
ret = SXEVF_STATS_LEN;
break;
case ETH_SS_PRIV_FLAGS:
ret = SXEVF_PRIV_FLAGS_STR_LEN;
break;
default:
ret = -EINVAL;
}
return ret;
}
static void sxevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
s8 *p;
u32 i, j, start;
struct sxevf_ring *ring;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
struct sxevf_adapter *adapter = netdev_priv(netdev);
sxevf_update_stats(adapter);
net_stats = dev_get_stats(netdev, &temp);
for (i = 0; i < SXEVF_STATS_ARRAY_SIZE; i++) {
switch (sxevf_gstrings_stats[i].type) {
case NETDEV_STATS:
p = (char *)net_stats +
sxevf_gstrings_stats[i].stat_offset;
break;
case SXEVF_STATS:
p = (char *)adapter +
sxevf_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
continue;
}
data[i] = (sxevf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ?
*(u64 *)p :
*(u32 *)p;
}
for (j = 0; j < adapter->tx_ring_ctxt.num; j++) {
ring = adapter->tx_ring_ctxt.ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
continue;
}
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}
for (j = 0; j < adapter->xdp_ring_ctxt.num; j++) {
ring = adapter->xdp_ring_ctxt.ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
continue;
}
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}
for (j = 0; j < adapter->rx_ring_ctxt.num; j++) {
ring = adapter->rx_ring_ctxt.ring[j];
if (!ring) {
data[i++] = 0;
data[i++] = 0;
continue;
}
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}
}
static int sxevf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int ret = -EOPNOTSUPP;
struct sxevf_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = adapter->rx_ring_ctxt.num;
ret = 0;
break;
default:
LOG_DEBUG_BDF("command parameters not supported\n, cmd=%u",
cmd->cmd);
break;
}
return ret;
}
static u32 sxevf_get_priv_flags(struct net_device *netdev)
{
u32 priv_flags = 0;
#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC
struct sxevf_adapter *adapter = netdev_priv(netdev);
if (adapter->cap & SXEVF_RX_LEGACY_ENABLE)
priv_flags |= SXEVF_PRIV_FLAGS_LEGACY_RX;
#endif
return priv_flags;
}
static int sxevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
u32 cap = adapter->cap;
#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC
cap &= ~SXEVF_RX_LEGACY_ENABLE;
if (priv_flags & SXEVF_PRIV_FLAGS_LEGACY_RX)
cap |= SXEVF_RX_LEGACY_ENABLE;
#endif
if (cap != adapter->cap) {
adapter->cap = cap;
if (netif_running(netdev))
sxevf_hw_reinit(adapter);
}
LOG_DEBUG_BDF("priv_flags=%u\n", priv_flags);
return 0;
}
static int sxevf_nway_reset(struct net_device *netdev)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
sxevf_hw_reinit(adapter);
LOG_DEBUG_BDF("ethtool reset\n");
}
return 0;
}
static int sxevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
u32 supported = 0;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = -1;
if (adapter->link.is_up) {
switch (adapter->link.speed) {
case SXEVF_LINK_SPEED_10GB_FULL:
supported |= SUPPORTED_10000baseKR_Full;
cmd->base.speed = SPEED_10000;
break;
case SXEVF_LINK_SPEED_1GB_FULL:
supported |= SUPPORTED_1000baseKX_Full;
cmd->base.speed = SPEED_1000;
break;
default:
supported |= SUPPORTED_10000baseKR_Full;
cmd->base.speed = SPEED_10000;
break;
}
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
cmd->base.duplex = DUPLEX_FULL;
} else {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static u32 sxevf_get_rss_redir_tbl_size(struct net_device *netdev)
{
return SXEVF_MAX_RETA_ENTRIES;
}
static u32 sxevf_get_rss_hash_key_size(struct net_device *netdev)
{
return SXEVF_RSS_HASH_KEY_SIZE;
}
static int sxevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
int err = 0;
struct sxevf_adapter *adapter = netdev_priv(netdev);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir && !key) {
LOG_DEBUG_BDF("param err, indir=%p, key=%p\n", indir, key);
return 0;
}
spin_lock_bh(&adapter->mbx_lock);
if (indir)
err = sxevf_redir_tbl_get(&adapter->hw, adapter->rx_ring_ctxt.num, indir);
if (!err && key)
err = sxevf_rss_hash_key_get(&adapter->hw, key);
spin_unlock_bh(&adapter->mbx_lock);
return err;
}
static int sxevf_get_regs_len(struct net_device *netdev)
{
return SXEVF_ETHTOOL_DUMP_REGS_LEN;
}
static void sxevf_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *data)
{
u8 dump_regs_num;
struct sxevf_adapter *adapter = netdev_priv(netdev);
struct sxevf_hw *hw = &adapter->hw;
memset(data, 0, SXEVF_ETHTOOL_DUMP_REGS_LEN);
regs->version = 0;
dump_regs_num =
hw->setup.ops->regs_dump(hw, data, SXEVF_ETHTOOL_DUMP_REGS_LEN);
if (dump_regs_num != SXEVF_ETHTOOL_DUMP_REGS_NUM) {
LOG_WARN_BDF("dump_regs_num=%u, regs_num_max=%u\n",
dump_regs_num, SXEVF_ETHTOOL_DUMP_REGS_NUM);
}
}
static u32 sxevf_get_msglevel(struct net_device *netdev)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void sxevf_set_msglevel(struct net_device *netdev, u32 data)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static s32 sxevf_link_test(struct sxevf_adapter *adapter)
{
s32 ret;
u32 link_up;
struct sxevf_hw *hw = &adapter->hw;
link_up = hw->setup.ops->link_state_get(hw);
if (!(link_up & SXE_VFLINKS_UP))
ret = -SXEVF_DIAG_TEST_BLOCKED;
else
ret = SXEVF_DIAG_TEST_PASSED;
return ret;
}
static int sxevf_reg_test(struct sxevf_adapter *adapter)
{
s32 ret;
struct sxevf_hw *hw = &adapter->hw;
if (sxevf_is_hw_fault(hw)) {
LOG_DEV_ERR("nic hw fault - register test blocked\n");
ret = -SXEVF_DIAG_TEST_BLOCKED;
goto l_end;
}
ret = hw->setup.ops->regs_test(hw);
if (ret) {
LOG_ERROR_BDF("register test failed\n");
goto l_end;
}
l_end:
return ret;
}
static void sxevf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *result)
{
s32 ret;
struct sxevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
struct sxevf_hw *hw = &adapter->hw;
if (sxevf_is_hw_fault(hw)) {
result[SXEVF_DIAG_REGS_TEST] = SXEVF_DIAG_TEST_BLOCKED;
result[SXEVF_DIAG_LINK_TEST] = SXEVF_DIAG_TEST_BLOCKED;
eth_test->flags |= ETH_TEST_FL_FAILED;
LOG_DEV_ERR("nic hw fault - test blocked\n");
return;
}
set_bit(SXEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
LOG_DEV_DEBUG("offline testing starting\n");
ret = sxevf_link_test(adapter);
if (ret)
eth_test->flags |= ETH_TEST_FL_FAILED;
result[SXEVF_DIAG_LINK_TEST] = -ret;
if (if_running)
sxevf_close(netdev);
else
sxevf_reset(adapter);
LOG_DEV_DEBUG("register testing starting\n");
ret = sxevf_reg_test(adapter);
if (ret)
eth_test->flags |= ETH_TEST_FL_FAILED;
result[SXEVF_DIAG_REGS_TEST] = -ret;
sxevf_reset(adapter);
clear_bit(SXEVF_TESTING, &adapter->state);
if (if_running)
sxevf_open(netdev);
} else {
LOG_DEV_DEBUG("online testing starting\n");
ret = sxevf_link_test(adapter);
if (ret)
eth_test->flags |= ETH_TEST_FL_FAILED;
result[SXEVF_DIAG_LINK_TEST] = -ret;
result[SXEVF_DIAG_REGS_TEST] = SXEVF_DIAG_TEST_PASSED;
clear_bit(SXEVF_TESTING, &adapter->state);
}
msleep_interruptible(SXEVF_TEST_SLEEP_TIME * SXEVF_HZ_TRANSTO_MS);
}
static int sxevf_get_coalesce(struct net_device *netdev,
#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
#else
struct ethtool_coalesce *ec)
#endif
{
return sxevf_irq_coalesce_get(netdev, ec);
}
static int sxevf_set_coalesce(struct net_device *netdev,
#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
#else
struct ethtool_coalesce *ec)
#endif
{
return sxevf_irq_coalesce_set(netdev, ec);
}
static const struct ethtool_ops sxevf_ethtool_ops = {
#ifdef ETHTOOL_COALESCE_USECS
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
#endif
.get_drvinfo = sxevf_get_drvinfo,
.nway_reset = sxevf_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = sxevf_get_ringparam,
.set_ringparam = sxevf_set_ringparam,
.get_channels = sxevf_get_channels,
.get_strings = sxevf_get_strings,
.get_sset_count = sxevf_get_sset_count,
.get_ethtool_stats = sxevf_get_ethtool_stats,
.get_rxnfc = sxevf_get_rxnfc,
.get_rxfh_indir_size = sxevf_get_rss_redir_tbl_size,
.get_rxfh_key_size = sxevf_get_rss_hash_key_size,
.get_rxfh = sxevf_get_rxfh,
.get_link_ksettings = sxevf_get_link_ksettings,
.get_priv_flags = sxevf_get_priv_flags,
.set_priv_flags = sxevf_set_priv_flags,
.get_regs_len = sxevf_get_regs_len,
.get_regs = sxevf_get_regs,
.get_msglevel = sxevf_get_msglevel,
.set_msglevel = sxevf_set_msglevel,
.self_test = sxevf_diag_test,
.set_coalesce = sxevf_set_coalesce,
.get_coalesce = sxevf_get_coalesce,
};
void sxevf_ethtool_ops_set(struct net_device *netdev)
{
netdev->ethtool_ops = &sxevf_ethtool_ops;
}

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_ethtool.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_ETHTOOL_H__
#define __SXEVF_ETHTOOL_H__
#include <linux/ethtool.h>
#include "sxevf.h"
#define SXEVF_TEST_GSTRING_ARRAY_SIZE sxevf_self_test_suite_num_get()
#define SXEVF_RING_STATS_LEN \
((((struct sxevf_adapter *)netdev_priv(netdev))->tx_ring_ctxt.num + \
((struct sxevf_adapter *)netdev_priv(netdev))->xdp_ring_ctxt.num + \
((struct sxevf_adapter *)netdev_priv(netdev))->rx_ring_ctxt.num) * \
(sizeof(struct sxevf_ring_stats) / sizeof(u64)))
#define SXEVF_STATS_ARRAY_SIZE sxevf_stats_num_get()
#define SXEVF_STATS_LEN (SXEVF_STATS_ARRAY_SIZE + SXEVF_RING_STATS_LEN)
#define SXEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
#define SXEVF_PRIV_FLAGS_STR_LEN sxevf_priv_flags_num_get()
enum { NETDEV_STATS, SXEVF_STATS };
struct sxevf_ethtool_stats {
char stat_string[ETH_GSTRING_LEN];
int type;
int sizeof_stat;
int stat_offset;
};
u32 sxevf_self_test_suite_num_get(void);
u32 sxevf_stats_num_get(void);
u32 sxevf_priv_flags_num_get(void);
void sxevf_ethtool_ops_set(struct net_device *netdev);
#endif

View File

@ -0,0 +1,973 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_hw.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
#include <linux/etherdevice.h>
#include "sxevf_hw.h"
#include "sxevf_regs.h"
#include "sxe_log.h"
#include "sxevf_irq.h"
#include "sxevf_msg.h"
#include "sxevf_ring.h"
#include "sxevf.h"
#include "sxevf_rx_proc.h"
#else
#include "sxe_errno.h"
#include "sxe_logs.h"
#include "sxe_dpdk_version.h"
#include "sxe_compat_version.h"
#include "sxevf.h"
#include "sxevf_hw.h"
#endif
#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
struct sxevf_adapter;
#endif
#define SXEVF_REG_READ_CNT 5
#define SXE_REG_READ_FAIL 0xffffffffU
#define SXEVF_RING_WAIT_LOOP (100)
#define SXEVF_MAX_RX_DESC_POLL (10)
#define SXEVF_REG_READ(hw, addr) sxevf_reg_read(hw, addr)
#define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value)
#define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS)
#ifndef SXE_DPDK
void sxevf_hw_fault_handle(struct sxevf_hw *hw)
{
struct sxevf_adapter *adapter = hw->adapter;
if (test_bit(SXEVF_HW_FAULT, &hw->state))
goto l_ret;
set_bit(SXEVF_HW_FAULT, &hw->state);
LOG_DEV_ERR("sxe nic hw fault\n");
if (hw->fault_handle && hw->priv)
hw->fault_handle(hw->priv);
l_ret:
;
}
static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
{
u32 value;
u8 __iomem *base_addr = hw->reg_base_addr;
struct sxevf_adapter *adapter = hw->adapter;
u8 i;
if (reg == SXE_VFSTATUS) {
sxevf_hw_fault_handle(hw);
return;
}
for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
value = hw->reg_read(base_addr + SXE_VFSTATUS);
if (value != SXEVF_REG_READ_FAIL)
break;
mdelay(20);
}
LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value);
if (value == SXEVF_REG_READ_FAIL)
sxevf_hw_fault_handle(hw);
}
static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
{
u32 value;
u8 __iomem *base_addr = hw->reg_base_addr;
struct sxevf_adapter *adapter = hw->adapter;
if (sxevf_is_hw_fault(hw)) {
value = SXEVF_REG_READ_FAIL;
goto l_ret;
}
value = hw->reg_read(base_addr + reg);
if (unlikely(value == SXEVF_REG_READ_FAIL)) {
LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value);
sxevf_hw_fault_check(hw, reg);
}
l_ret:
return value;
}
static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
{
u8 __iomem *base_addr = hw->reg_base_addr;
if (sxevf_is_hw_fault(hw))
goto l_ret;
hw->reg_write(value, base_addr + reg);
l_ret:
;
}
#else
static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
{
u32 i, value;
u8 __iomem *base_addr = hw->reg_base_addr;
value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
if (unlikely(value == SXEVF_REG_READ_FAIL)) {
for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
LOG_ERROR("reg[0x%x] read failed, value=%#x\n", reg,
value);
value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
if (value != SXEVF_REG_READ_FAIL) {
LOG_INFO("reg[0x%x] read ok, value=%#x\n", reg,
value);
break;
}
mdelay(3);
}
}
return value;
}
static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
{
u8 __iomem *base_addr = hw->reg_base_addr;
rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
}
#endif
void sxevf_hw_stop(struct sxevf_hw *hw)
{
u8 i;
u32 value;
for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
value = SXEVF_REG_READ(hw, SXE_VFRXDCTL(i));
if (value & SXE_VFRXDCTL_ENABLE) {
value &= ~SXE_VFRXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), value);
}
}
SXEVF_WRITE_FLUSH(hw);
SXEVF_REG_WRITE(hw, SXE_VFEIMC, SXEVF_VFEIMC_IRQ_MASK);
SXEVF_REG_READ(hw, SXE_VFEICR);
for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
value = SXEVF_REG_READ(hw, SXE_VFTXDCTL(i));
if (value & SXE_VFTXDCTL_ENABLE) {
value &= ~SXE_VFTXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), value);
}
}
}
void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
{
struct sxevf_adapter *adapter = hw->adapter;
SXEVF_REG_WRITE(hw, SXE_VFMBMEM + (index << 2), msg);
LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg);
}
u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index)
{
u32 value = SXEVF_REG_READ(hw, SXE_VFMBMEM + (index << 2));
struct sxevf_adapter *adapter = hw->adapter;
LOG_DEBUG_BDF("index:%u read mbx mem:0x%x.\n", index, value);
return value;
}
u32 sxevf_mailbox_read(struct sxevf_hw *hw)
{
return SXEVF_REG_READ(hw, SXE_VFMAILBOX);
}
void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value);
}
void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw)
{
SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ);
}
void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw)
{
SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK);
}
void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
{
u8 allocation;
u32 ivar;
allocation = vector | SXEVF_IVAR_ALLOC_VALID;
ivar = SXEVF_REG_READ(hw, SXE_VFIVAR_MISC);
ivar &= ~0xFF;
ivar |= allocation;
SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar);
}
void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFEIMS, value);
}
void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
{
SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask);
SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask);
}
void sxevf_irq_disable(struct sxevf_hw *hw)
{
SXEVF_REG_WRITE(hw, SXE_VFEIAM, 0);
SXEVF_REG_WRITE(hw, SXE_VFEIMC, ~0);
SXEVF_WRITE_FLUSH(hw);
}
void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx,
u16 vector)
{
u8 allocation;
u32 ivar, position;
allocation = vector | SXEVF_IVAR_ALLOC_VALID;
position = ((hw_ring_idx & 1) * 16) + (8 * is_tx);
ivar = SXEVF_REG_READ(hw, SXE_VFIVAR(hw_ring_idx >> 1));
ivar &= ~(0xFF << position);
ivar |= (allocation << position);
SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar);
}
void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
{
u32 eitr = interval & SXEVF_EITR_ITR_MASK;
eitr |= SXEVF_EITR_CNT_WDIS;
SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr);
}
static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx,
u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value);
}
static void sxevf_pending_irq_clear(struct sxevf_hw *hw)
{
SXEVF_REG_READ(hw, SXE_VFEICR);
}
static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics)
{
SXEVF_REG_WRITE(hw, SXE_VFEICS, eics);
}
static const struct sxevf_irq_operations sxevf_irq_ops = {
.ring_irq_interval_set = sxevf_ring_irq_interval_set,
.event_irq_interval_set = sxevf_event_irq_interval_set,
.ring_irq_map = sxevf_hw_ring_irq_map,
.event_irq_map = sxevf_event_irq_map,
.pending_irq_clear = sxevf_pending_irq_clear,
.ring_irq_trigger = sxevf_ring_irq_trigger,
.specific_irq_enable = sxevf_specific_irq_enable,
.irq_enable = sxevf_irq_enable,
.irq_disable = sxevf_irq_disable,
};
void sxevf_hw_reset(struct sxevf_hw *hw)
{
SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST);
SXEVF_WRITE_FLUSH(hw);
}
static bool sxevf_hw_rst_done(struct sxevf_hw *hw)
{
return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST);
}
u32 sxevf_link_state_get(struct sxevf_hw *hw)
{
return SXEVF_REG_READ(hw, SXE_VFLINKS);
}
u32 dump_regs[] = {
SXE_VFCTRL,
};
u16 sxevf_reg_dump_num_get(void)
{
return ARRAY_SIZE(dump_regs);
}
static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size)
{
u32 i;
u32 regs_num = buf_size / sizeof(u32);
for (i = 0; i < regs_num; i++)
regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]);
return i;
}
#define PATTERN_TEST 1
#define SET_READ_TEST 2
#define WRITE_NO_TEST 3
#define TABLE32_TEST 4
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
struct sxevf_self_test_reg {
u32 reg;
u8 array_len;
u8 test_type;
u32 mask;
u32 write;
};
static const struct sxevf_self_test_reg self_test_reg[] = {
{ SXE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ SXE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ SXE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, SXEVF_RXDCTL_ENABLE },
{ SXE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
{ SXE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ SXE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ SXE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
{ .reg = 0 }
};
static s32 sxevf_reg_pattern_test(struct sxevf_hw *hw, u32 reg, u32 mask,
u32 write)
{
s32 ret = 0;
u32 pat, val, before;
static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000,
0xFFFFFFFE };
struct sxevf_adapter *adapter = hw->adapter;
if (sxevf_is_hw_fault(hw)) {
LOG_ERROR_BDF("hw fault\n");
ret = -SXEVF_DIAG_TEST_BLOCKED;
goto l_end;
}
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
before = SXEVF_REG_READ(hw, reg);
SXEVF_REG_WRITE(hw, reg, test_pattern[pat] & write);
val = SXEVF_REG_READ(hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
LOG_MSG_ERR(drv,
"pattern test reg %04X failed:\n"
"\tgot 0x%08X expected 0x%08X\n",
reg, val,
(test_pattern[pat] & write & mask));
SXEVF_REG_WRITE(hw, reg, before);
ret = -SXEVF_DIAG_REG_PATTERN_TEST_ERR;
goto l_end;
}
SXEVF_REG_WRITE(hw, reg, before);
}
l_end:
return ret;
}
static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg, u32 mask,
u32 write)
{
s32 ret = 0;
u32 val, before;
struct sxevf_adapter *adapter = hw->adapter;
if (sxevf_is_hw_fault(hw)) {
LOG_ERROR_BDF("hw fault\n");
ret = -SXEVF_DIAG_TEST_BLOCKED;
goto l_end;
}
before = SXEVF_REG_READ(hw, reg);
SXEVF_REG_WRITE(hw, reg, write & mask);
val = SXEVF_REG_READ(hw, reg);
if ((write & mask) != (val & mask)) {
LOG_DEV_ERR("set/check reg %04X test failed:\n"
"\tgot 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
SXEVF_REG_WRITE(hw, reg, before);
ret = -SXEVF_DIAG_CHECK_REG_TEST_ERR;
goto l_end;
}
SXEVF_REG_WRITE(hw, reg, before);
l_end:
return ret;
}
static s32 sxevf_regs_test(struct sxevf_hw *hw)
{
u32 i;
s32 ret = 0;
const struct sxevf_self_test_reg *test = self_test_reg;
struct sxevf_adapter *adapter = hw->adapter;
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
ret = sxevf_reg_pattern_test(hw,
test->reg + (i * 0x40),
test->mask, test->write);
break;
case TABLE32_TEST:
ret = sxevf_reg_pattern_test(hw,
test->reg + (i * 4),
test->mask, test->write);
break;
case TABLE64_TEST_LO:
ret = sxevf_reg_pattern_test(hw,
test->reg + (i * 8),
test->mask, test->write);
break;
case TABLE64_TEST_HI:
ret = sxevf_reg_pattern_test(hw,
(test->reg + 4) + (i * 8),
test->mask, test->write);
break;
case SET_READ_TEST:
ret = sxevf_reg_set_and_check(hw,
test->reg + (i * 0x40),
test->mask, test->write);
break;
case WRITE_NO_TEST:
SXEVF_REG_WRITE(hw, test->reg + (i * 0x40),
test->write);
break;
default:
LOG_ERROR_BDF("reg test mod err, type=%d\n",
test->test_type);
break;
}
if (ret)
goto l_end;
}
test++;
}
l_end:
return ret;
}
static const struct sxevf_setup_operations sxevf_setup_ops = {
.reset = sxevf_hw_reset,
.hw_stop = sxevf_hw_stop,
.regs_test = sxevf_regs_test,
.regs_dump = sxevf_reg_dump,
.link_state_get = sxevf_link_state_get,
.reset_done = sxevf_hw_rst_done,
};
static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx)
{
SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx),
(desc_dma_addr & DMA_BIT_MASK(32)));
SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
}
static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
{
SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0);
SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0);
}
static void sxevf_tx_desc_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
u32 wb_thresh, u32 host_thresh,
u32 prefech_thresh)
{
u32 txdctl = 0;
txdctl |= (wb_thresh << SXEVF_TXDCTL_WTHRESH_SHIFT);
txdctl |= (host_thresh << SXEVF_TXDCTL_HTHRESH_SHIFT) | prefech_thresh;
SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
}
void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
{
u32 wait_loop = SXEVF_MAX_TXRX_DESC_POLL;
struct sxevf_adapter *adapter = hw->adapter;
u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
if (is_on) {
txdctl |= SXEVF_TXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
do {
usleep_range(1000, 2000);
txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & SXEVF_TXDCTL_ENABLE));
} else {
txdctl &= ~SXEVF_TXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
do {
usleep_range(1000, 2000);
txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
} while (--wait_loop && (txdctl & SXEVF_TXDCTL_ENABLE));
}
if (!wait_loop)
LOG_DEV_ERR("tx ring %u switch %u failed within\n"
"\tthe polling period\n", reg_idx, is_on);
}
static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
{
u32 rxdctl;
u32 wait_loop = SXEVF_RX_RING_POLL_MAX;
struct sxevf_adapter *adapter = hw->adapter;
if (!hw->reg_base_addr)
goto l_end;
rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
rxdctl &= ~SXE_VFRXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
do {
SXEVF_UDELAY(10);
rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & SXE_VFRXDCTL_ENABLE));
if (!wait_loop)
LOG_ERROR_BDF("RXDCTL.ENABLE queue %d not cleared while polling\n",
reg_idx);
l_end:
;
}
void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
{
u32 rxdctl;
u32 wait_loop = SXEVF_RING_WAIT_LOOP;
struct sxevf_adapter *adapter = hw->adapter;
rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
if (is_on) {
rxdctl |= SXEVF_RXDCTL_ENABLE | SXEVF_RXDCTL_VME;
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
do {
usleep_range(1000, 2000);
rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
} while (--wait_loop && !(rxdctl & SXEVF_RXDCTL_ENABLE));
} else {
rxdctl &= ~SXEVF_RXDCTL_ENABLE;
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
do {
usleep_range(1000, 2000);
rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & SXEVF_RXDCTL_ENABLE));
}
SXEVF_WRITE_FLUSH(hw);
if (!wait_loop)
LOG_DEV_ERR("rx ring %u switch %u failed within\n"
"\tthe polling period\n", reg_idx, is_on);
}
void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx)
{
SXEVF_REG_WRITE(hw, SXE_VFRDBAL(reg_idx),
(desc_dma_addr & DMA_BIT_MASK(32)));
SXEVF_REG_WRITE(hw, SXE_VFRDBAH(reg_idx), (desc_dma_addr >> 32));
SXEVF_REG_WRITE(hw, SXE_VFRDLEN(reg_idx), desc_mem_len);
SXEVF_WRITE_FLUSH(hw);
SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0);
SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0);
}
void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
u32 header_buf_len, u32 pkg_buf_len, bool drop_en)
{
u32 srrctl = 0;
if (drop_en)
srrctl = SXEVF_SRRCTL_DROP_EN;
srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) &
SXEVF_SRRCTL_BSIZEHDR_MASK);
srrctl |= ((pkg_buf_len >> SXEVF_SRRCTL_BSIZEPKT_SHIFT) &
SXEVF_SRRCTL_BSIZEPKT_MASK);
SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl);
}
static void sxevf_tx_ring_info_get(struct sxevf_hw *hw, u8 idx, u32 *head,
u32 *tail)
{
*head = SXEVF_REG_READ(hw, SXE_VFTDH(idx));
*tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx));
}
static const struct sxevf_dma_operations sxevf_dma_ops = {
.tx_ring_desc_configure = sxevf_tx_ring_desc_configure,
.tx_writeback_off = sxevf_tx_writeback_off,
.tx_desc_thresh_set = sxevf_tx_desc_thresh_set,
.tx_ring_switch = sxevf_tx_ring_switch,
.tx_ring_info_get = sxevf_tx_ring_info_get,
.rx_disable = sxevf_rx_disable,
.rx_ring_switch = sxevf_rx_ring_switch,
.rx_ring_desc_configure = sxevf_rx_ring_desc_configure,
.rx_rcv_ctl_configure = sxevf_rx_rcv_ctl_configure,
};
#ifdef SXE_DPDK
void sxevf_32bit_counter_update(struct sxevf_hw *hw,
u32 reg, u64 *last, u64 *cur)
{
u32 latest = SXEVF_REG_READ(hw, reg);
*cur = (latest - *last) & UINT_MAX;
*last = latest;
}
void sxevf_36bit_counter_update(struct sxevf_hw *hw,
u32 lsb, u32 msb, u64 *last, u64 *cur)
{
u64 new_lsb = SXEVF_REG_READ(hw, lsb);
u64 new_msb = SXEVF_REG_READ(hw, msb);
u64 latest = ((new_msb << 32) | new_lsb);
*cur += (0x1000000000LL + latest - *last) & 0xFFFFFFFFFLL;
*last = latest;
}
#else
void sxevf_32bit_counter_update(struct sxevf_hw *hw,
u32 reg, u64 *last, u64 *cur)
{
u32 current_counter = SXEVF_REG_READ(hw, reg);
if (current_counter < *last)
*cur += 0x100000000LL;
*last = current_counter;
*cur &= 0xFFFFFFFF00000000LL;
*cur |= current_counter;
}
void sxevf_36bit_counter_update(struct sxevf_hw *hw,
u32 lsb, u32 msb, u64 *last, u64 *cur)
{
u64 current_counter_lsb = SXEVF_REG_READ(hw, lsb);
u64 current_counter_msb = SXEVF_REG_READ(hw, msb);
u64 current_counter = (current_counter_msb << 32) |
current_counter_lsb;
if (current_counter < *last)
*cur += 0x1000000000LL;
*last = current_counter;
*cur &= 0xFFFFFFF000000000LL;
*cur |= current_counter;
}
#endif
void sxevf_packet_stats_get(struct sxevf_hw *hw, struct sxevf_hw_stats *stats)
{
sxevf_32bit_counter_update(hw, SXEVF_VFGPRC, &stats->last_vfgprc,
&stats->vfgprc);
sxevf_32bit_counter_update(hw, SXEVF_VFGPTC, &stats->last_vfgptc,
&stats->vfgptc);
sxevf_36bit_counter_update(hw, SXEVF_VFGORC_LSB, SXEVF_VFGORC_MSB,
&stats->last_vfgorc, &stats->vfgorc);
sxevf_36bit_counter_update(hw, SXEVF_VFGOTC_LSB, SXEVF_VFGOTC_MSB,
&stats->last_vfgotc, &stats->vfgotc);
sxevf_32bit_counter_update(hw, SXEVF_VFMPRC, &stats->last_vfmprc,
&stats->vfmprc);
}
void sxevf_stats_init_value_get(struct sxevf_hw *hw,
struct sxevf_hw_stats *stats)
{
stats->last_vfgprc = SXEVF_REG_READ(hw, SXE_VFGPRC);
stats->last_vfgorc = SXEVF_REG_READ(hw, SXE_VFGORC_LSB);
stats->last_vfgorc |=
(((u64)(SXEVF_REG_READ(hw, SXE_VFGORC_MSB))) << 32);
stats->last_vfgptc = SXEVF_REG_READ(hw, SXE_VFGPTC);
stats->last_vfgotc = SXEVF_REG_READ(hw, SXE_VFGOTC_LSB);
stats->last_vfgotc |=
(((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32);
stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC);
}
static const struct sxevf_stat_operations sxevf_stat_ops = {
.packet_stats_get = sxevf_packet_stats_get,
.stats_init_value_get = sxevf_stats_init_value_get,
};
static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring)
{
u32 rqpl = 0;
if (max_rx_ring > 1)
rqpl |= BIT(29);
SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl);
}
static const struct sxevf_dbu_operations sxevf_dbu_ops = {
.rx_max_used_ring_set = sxevf_rx_max_used_ring_set,
};
static const struct sxevf_mbx_operations sxevf_mbx_ops = {
.mailbox_read = sxevf_mailbox_read,
.mailbox_write = sxevf_mailbox_write,
.msg_write = sxevf_msg_write,
.msg_read = sxevf_msg_read,
.pf_req_irq_trigger = sxevf_pf_req_irq_trigger,
.pf_ack_irq_trigger = sxevf_pf_ack_irq_trigger,
};
void sxevf_hw_ops_init(struct sxevf_hw *hw)
{
hw->setup.ops = &sxevf_setup_ops;
hw->irq.ops = &sxevf_irq_ops;
hw->mbx.ops = &sxevf_mbx_ops;
hw->dma.ops = &sxevf_dma_ops;
hw->stat.ops = &sxevf_stat_ops;
hw->dbu.ops = &sxevf_dbu_ops;
}
#ifdef SXE_DPDK
#define SXEVF_RSS_FIELD_MASK 0xffff0000
#define SXEVF_MRQC_RSSEN 0x00000001
#define SXEVF_RSS_KEY_SIZE (40)
#define SXEVF_MAX_RSS_KEY_ENTRIES (10)
#define SXEVF_MAX_RETA_ENTRIES (128)
void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
{
int i;
u32 vfsrrctl;
vfsrrctl = 0x100 << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT;
vfsrrctl |= 0x800 >> SXEVF_SRRCTL_BSIZEPKT_SHIFT;
SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, 0);
for (i = 0; i < 7; i++) {
SXEVF_REG_WRITE(hw, SXE_VFRDH(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFRDT(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(i), vfsrrctl);
SXEVF_REG_WRITE(hw, SXE_VFTDH(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFTDT(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFTDWBAH(i), 0);
SXEVF_REG_WRITE(hw, SXE_VFTDWBAL(i), 0);
}
SXEVF_WRITE_FLUSH(hw);
}
u32 sxevf_irq_cause_get(struct sxevf_hw *hw)
{
return SXEVF_REG_READ(hw, SXE_VFEICR);
}
void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx)
{
SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx),
(desc_dma_addr & DMA_BIT_MASK(32)));
SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
}
void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value);
}
void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw, u16 reg_index,
bool is_enable)
{
u32 vlnctrl;
vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index));
if (is_enable)
vlnctrl |= SXEVF_RXDCTL_VME;
else
vlnctrl &= ~SXEVF_RXDCTL_VME;
SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl);
}
void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
u32 prefech_thresh, u32 host_thresh, u32 wb_thresh)
{
u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
txdctl |= (prefech_thresh & SXEVF_TXDCTL_THRESH_MASK);
txdctl |= ((host_thresh & SXEVF_TXDCTL_THRESH_MASK)
<< SXEVF_TXDCTL_HTHRESH_SHIFT);
txdctl |= ((wb_thresh & SXEVF_TXDCTL_THRESH_MASK)
<< SXEVF_TXDCTL_WTHRESH_SHIFT);
SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
}
void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value);
}
u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx)
{
return SXEVF_REG_READ(hw, SXE_VFRETA(reg_idx >> 2));
}
void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw, u16 reg_idx, u32 value)
{
SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value);
}
u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx)
{
u32 rss_key;
if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES)
rss_key = 0;
else
rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx));
return rss_key;
}
u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw)
{
u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
return (mrqc & SXEVF_RSS_FIELD_MASK);
}
bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw)
{
bool rss_enable = false;
u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
if (mrqc & SXEVF_MRQC_RSSEN)
rss_enable = true;
return rss_enable;
}
void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key)
{
u32 i;
for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++)
SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]);
}
void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on)
{
u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
if (is_on)
mrqc |= SXEVF_MRQC_RSSEN;
else
mrqc &= ~SXEVF_MRQC_RSSEN;
SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
}
void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
{
u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
mrqc &= ~SXEVF_RSS_FIELD_MASK;
mrqc |= rss_field;
SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
}
u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
const struct sxevf_reg_info *regs, u32 *reg_buf)
{
u32 j, i = 0;
int count = 0;
while (regs[i].count) {
for (j = 0; j < regs[i].count; j++) {
reg_buf[count + j] = SXEVF_REG_READ(hw,
regs[i].addr + j * regs[i].stride);
LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
regs[i].name, regs[i].addr, reg_buf[count + j]);
}
i++;
count += j;
}
return count;
};
#endif

View File

@ -0,0 +1,360 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_hw.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_HW_H__
#define __SXEVF_HW_H__
#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/if_ether.h>
#else
#include "sxe_compat_platform.h"
#ifdef SXE_HOST_DRIVER
#include "sxe_drv_type.h"
#endif
#endif
#include "sxevf_regs.h"
#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
#define SXE_PRIU64 "llu"
#define SXE_PRIX64 "llx"
#define SXE_PRID64 "lld"
#else
#define SXE_PRIU64 PRIU64
#define SXE_PRIX64 PRIX64
#define SXE_PRID64 PRID64
#endif
#define SXEVF_TXRX_RING_NUM_MAX 8
#define SXEVF_MAX_TXRX_DESC_POLL (10)
#define SXEVF_TX_DESC_PREFETCH_THRESH_32 (32)
#define SXEVF_TX_DESC_HOST_THRESH_1 (1)
#define SXEVF_TX_DESC_WRITEBACK_THRESH_8 (8)
#define SXEVF_TXDCTL_HTHRESH_SHIFT (8)
#define SXEVF_TXDCTL_WTHRESH_SHIFT (16)
#define SXEVF_TXDCTL_THRESH_MASK (0x7F)
#define SXEVF_RX_RING_POLL_MAX (10)
#define SXEVF_MAC_HDR_LEN_MAX (127)
#define SXEVF_NETWORK_HDR_LEN_MAX (511)
#define SXEVF_LINK_SPEED_UNKNOWN 0
#define SXEVF_LINK_SPEED_1GB_FULL 0x0020
#define SXEVF_LINK_SPEED_10GB_FULL 0x0080
#define SXEVF_LINK_SPEED_100_FULL 0x0008
#define SXEVF_VFT_TBL_SIZE (128)
#define SXEVF_HW_TXRX_RING_NUM_MAX (128)
#define SXEVF_VLAN_TAG_SIZE (4)
#define SXEVF_HW_UC_ENTRY_NUM_MAX 128
#define SXEVF_UDELAY(x) udelay(x)
enum {
SXEVF_LINK_TO_PHY = 0,
SXEVF_LINK_TO_DOWN,
SXEVF_LINK_TO_REINIT,
};
enum {
SXEVF_DIAG_TEST_PASSED = 0,
SXEVF_DIAG_TEST_BLOCKED = 1,
SXEVF_DIAG_REG_PATTERN_TEST_ERR = 2,
SXEVF_DIAG_CHECK_REG_TEST_ERR = 3,
};
struct sxevf_hw;
struct sxevf_hw_stats {
u64 base_vfgprc;
u64 base_vfgptc;
u64 base_vfgorc;
u64 base_vfgotc;
u64 base_vfmprc;
u64 last_vfgprc;
u64 last_vfgptc;
u64 last_vfgorc;
u64 last_vfgotc;
u64 last_vfmprc;
u64 vfgprc;
u64 vfgptc;
u64 vfgorc;
u64 vfgotc;
u64 vfmprc;
u64 saved_reset_vfgprc;
u64 saved_reset_vfgptc;
u64 saved_reset_vfgorc;
u64 saved_reset_vfgotc;
u64 saved_reset_vfmprc;
};
void sxevf_hw_ops_init(struct sxevf_hw *hw);
struct sxevf_setup_operations {
void (*reset)(struct sxevf_hw *hw);
void (*hw_stop)(struct sxevf_hw *hw);
s32 (*regs_test)(struct sxevf_hw *hw);
u32 (*link_state_get)(struct sxevf_hw *hw);
u32 (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size);
bool (*reset_done)(struct sxevf_hw *hw);
};
struct sxevf_hw_setup {
const struct sxevf_setup_operations *ops;
};
struct sxevf_irq_operations {
void (*pending_irq_clear)(struct sxevf_hw *hw);
void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx,
u32 interval);
void (*event_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx,
u32 value);
void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx,
u16 irq_idx);
void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx);
void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics);
void (*irq_enable)(struct sxevf_hw *hw, u32 mask);
void (*specific_irq_enable)(struct sxevf_hw *hw, u32 value);
void (*irq_disable)(struct sxevf_hw *hw);
void (*irq_off)(struct sxevf_hw *hw);
};
struct sxevf_irq_info {
const struct sxevf_irq_operations *ops;
};
struct sxevf_mbx_operations {
u32 (*mailbox_read)(struct sxevf_hw *hw);
void (*mailbox_write)(struct sxevf_hw *hw, u32 value);
void (*msg_write)(struct sxevf_hw *hw, u8 index, u32 msg);
u32 (*msg_read)(struct sxevf_hw *hw, u8 index);
void (*pf_req_irq_trigger)(struct sxevf_hw *hw);
void (*pf_ack_irq_trigger)(struct sxevf_hw *hw);
};
struct sxevf_mbx_stats {
u32 send_msgs;
u32 rcv_msgs;
u32 reqs;
u32 acks;
u32 rsts;
};
struct sxevf_mbx_info {
const struct sxevf_mbx_operations *ops;
struct sxevf_mbx_stats stats;
u32 msg_len;
u32 retry;
u32 interval;
u32 reg_value;
u32 api_version;
};
struct sxevf_dma_operations {
void (*tx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx);
void (*tx_writeback_off)(struct sxevf_hw *hw, u8 reg_idx);
void (*tx_desc_thresh_set)(struct sxevf_hw *hw, u8 reg_idx,
u32 wb_thresh, u32 host_thresh,
u32 prefech_thresh);
void (*tx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
void (*tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx, u32 *head,
u32 *tail);
void (*rx_disable)(struct sxevf_hw *hw, u8 reg_idx);
void (*rx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
void (*rx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx);
void (*rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
u32 header_buf_len, u32 pkg_buf_len, bool drop_en);
};
struct sxevf_dma_info {
const struct sxevf_dma_operations *ops;
};
struct sxevf_stat_operations {
void (*packet_stats_get)(struct sxevf_hw *hw, struct sxevf_hw_stats *stats);
void (*stats_init_value_get)(struct sxevf_hw *hw,
struct sxevf_hw_stats *stats);
};
struct sxevf_stat_info {
const struct sxevf_stat_operations *ops;
};
struct sxevf_dbu_operations {
void (*rx_max_used_ring_set)(struct sxevf_hw *hw, u16 max_rx_ring);
};
struct sxevf_dbu_info {
const struct sxevf_dbu_operations *ops;
};
enum sxevf_hw_state {
SXEVF_HW_STOP,
SXEVF_HW_FAULT,
};
struct sxevf_hw {
u8 __iomem *reg_base_addr;
void *adapter;
void *priv;
unsigned long state;
void (*fault_handle)(void *priv);
u32 (*reg_read)(const void *reg);
void (*reg_write)(u32 value, void *reg);
s32 board_type;
struct sxevf_hw_setup setup;
struct sxevf_irq_info irq;
struct sxevf_mbx_info mbx;
struct sxevf_dma_info dma;
struct sxevf_stat_info stat;
struct sxevf_dbu_info dbu;
};
struct sxevf_reg_info {
u32 addr;
u32 count;
u32 stride;
const s8 *name;
};
u16 sxevf_reg_dump_num_get(void);
void sxevf_hw_fault_handle(struct sxevf_hw *hw);
static inline bool sxevf_is_hw_fault(struct sxevf_hw *hw)
{
return test_bit(SXEVF_HW_FAULT, &hw->state);
}
static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw,
void (*handle)(void *),
void *priv)
{
hw->priv = priv;
hw->fault_handle = handle;
}
static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
u32 (*read)(const void *),
void (*write)(u32, void *))
{
hw->reg_read = read;
hw->reg_write = write;
}
#ifdef SXE_DPDK
void sxevf_irq_disable(struct sxevf_hw *hw);
void sxevf_hw_stop(struct sxevf_hw *hw);
void sxevf_hw_reset(struct sxevf_hw *hw);
void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg);
u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index);
u32 sxevf_mailbox_read(struct sxevf_hw *hw);
void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value);
void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw);
void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw);
void sxevf_rxtx_reg_init(struct sxevf_hw *hw);
void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask);
u32 sxevf_irq_cause_get(struct sxevf_hw *hw);
void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector);
void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx,
u16 vector);
void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx,
u32 interval);
void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx);
void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
u64 desc_dma_addr, u8 reg_idx);
void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
u32 header_buf_len, u32 pkg_buf_len,
bool drop_en);
void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value);
void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw, u16 reg_index,
bool is_enable);
void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
u32 prefech_thresh, u32 host_thresh,
u32 wb_thresh);
void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value);
void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value);
void sxevf_packet_stats_get(struct sxevf_hw *hw, struct sxevf_hw_stats *stats);
void sxevf_stats_init_value_get(struct sxevf_hw *hw,
struct sxevf_hw_stats *stats);
u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx);
void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw, u16 reg_idx, u32 value);
u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx);
u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw);
void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field);
void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on);
void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key);
bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw);
u32 sxevf_link_state_get(struct sxevf_hw *hw);
u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
const struct sxevf_reg_info *regs, u32 *reg_buf);
#endif
#endif

View File

@ -0,0 +1,808 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_ipsec.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifdef SXE_IPSEC_CONFIGURE
#include <net/xfrm.h>
#include <crypto/aead.h>
#include <linux/if_bridge.h>
#include "sxevf_ipsec.h"
#include "sxevf_tx_proc.h"
#include "sxevf_ring.h"
#include "sxevf_msg.h"
static const char ipsec_aes_name[] = "rfc4106(gcm(aes))";
s32 sxevf_ipsec_sa_add(struct sxevf_adapter *adapter, struct xfrm_state *xs,
u32 *pf_sa_idx)
{
struct sxevf_ipsec_add_msg msg = {};
struct sxevf_hw *hw = &adapter->hw;
s32 ret;
msg.msg_type = SXEVF_IPSEC_ADD;
msg.flags = xs->xso.flags;
msg.spi = xs->id.spi;
msg.proto = xs->id.proto;
msg.family = xs->props.family;
if (xs->props.family == AF_INET6)
memcpy(msg.addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
else
memcpy(msg.addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
memcpy(msg.key, xs->aead->alg_key, sizeof(msg.key));
ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
SXEVF_MSG_NUM(sizeof(msg)));
if (!ret && (msg.msg_type == (SXEVF_IPSEC_ADD | SXEVF_MSGTYPE_ACK))) {
if (pf_sa_idx)
*pf_sa_idx = msg.pf_sa_idx;
LOG_INFO("xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf_sa_idx:%u\n",
xs->xso.flags, xs->id.spi, xs->id.proto,
xs->props.family, msg.pf_sa_idx);
} else {
LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
xs->xso.flags, xs->id.spi, xs->id.proto,
xs->props.family, ret);
}
return ret;
}
s32 sxevf_ipsec_sa_del(struct sxevf_adapter *adapter, u32 pf_sa_idx)
{
struct sxevf_ipsec_del_msg msg = {};
struct sxevf_hw *hw = &adapter->hw;
s32 ret;
msg.msg_type = SXEVF_IPSEC_DEL;
msg.sa_idx = pf_sa_idx;
ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
SXEVF_MSG_NUM(sizeof(msg)));
if (ret) {
LOG_ERROR("del pf sa:%d fail.(err:%d)\n", pf_sa_idx, ret);
goto l_end;
}
l_end:
return ret;
}
static inline bool sxevf_need_tx_ipsec_offload(struct sk_buff *skb)
{
struct sec_path *sp = skb->sp;
bool ret = true;
if (!sp || !sp->olen || sp->len != sp->olen)
ret = false;
return ret;
}
static struct xfrm_state *
sxevf_ipsec_rx_sa_match(struct sxevf_ipsec_context *ipsec, __be32 spi, u8 proto,
__be32 *daddr, u8 daddr_len)
{
struct sxevf_rx_sa *sa = NULL;
struct xfrm_state *xs = NULL;
rcu_read_lock();
hash_for_each_possible_rcu(ipsec->rx_table_list, sa, hlist,
(__force u32)spi) {
if (spi == sa->xs->id.spi && proto == sa->xs->id.proto &&
!memcmp(daddr, &sa->xs->id.daddr, daddr_len)) {
xs = sa->xs;
xfrm_state_hold(xs);
break;
}
}
rcu_read_unlock();
return xs;
}
static s32 sxevf_ipsec_tx_offload_param_valid(struct sk_buff *skb,
struct sxevf_tx_sa *sa,
u32 *vf_sa_idx,
struct xfrm_state **xfrm_state)
{
s32 ret = -SXEVF_ERR_ARGUMENT_INVALID;
u32 idx;
struct sec_path *path;
struct xfrm_state *xs;
path = skb_sec_path(skb);
if (unlikely(!path->len)) {
LOG_DEV_ERR("security path len:0 invalid.\n");
goto l_out;
}
xs = xfrm_input_state(skb);
if (unlikely(!xs)) {
LOG_DEV_ERR("security input xs NULL.\n");
goto l_out;
}
*xfrm_state = xs;
idx = xs->xso.offload_handle - SXEVF_IPSEC_TX_INDEX_BASE;
if (idx >= SXEVF_IPSEC_SA_CNT_MAX) {
LOG_DEV_ERR("invalid offload_handle:%lu idx:%d.\n",
xs->xso.offload_handle, idx);
goto l_out;
}
if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &sa[idx].status)) {
LOG_DEV_ERR("tx_table[%d] not used.\n", idx);
goto l_out;
}
*vf_sa_idx = idx;
LOG_INFO("vf_sa_idx:%u tx ipsec offload valid passed\n", *vf_sa_idx);
ret = 0;
l_out:
return ret;
}
s32 sxevf_tx_ipsec_offload(struct sxevf_ring *tx_ring,
struct sxevf_tx_buffer *first,
struct sxevf_tx_context_desc *ctxt_desc)
{
u32 vf_sa_idx;
s32 ret = 0;
struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
struct sxevf_tx_sa *sa = ipsec->tx_table;
struct xfrm_state *xfrm_state = NULL;
u32 tucmd_ipsec = 0;
if (!sxevf_need_tx_ipsec_offload(first->skb)) {
LOG_DEBUG("ring[%u] no need offload IPsec.\n", tx_ring->idx);
goto l_out;
}
ret = sxevf_ipsec_tx_offload_param_valid(first->skb, sa, &vf_sa_idx,
&xfrm_state);
if (ret) {
LOG_ERROR("ring[%d ]tx ipsec valid failed.\n", tx_ring->idx);
goto l_out;
}
first->tx_features |= SXEVF_TX_FEATURE_IPSEC | SXEVF_TX_FEATURE_CSUM;
if (xfrm_state->id.proto == IPPROTO_ESP) {
tucmd_ipsec = SXEVF_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP |
SXEVF_TX_CTXTD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP))
tucmd_ipsec |= SXEVF_TX_CTXTD_TUCMD_IPV4;
if (!skb_is_gso(first->skb)) {
const u32 auth_len =
SXEVF_IPSEC_AUTH_BIT_LEN / CHAR_BITS;
u8 pad_len;
ret = skb_copy_bits(first->skb,
first->skb->len -
SXEVF_IPSEC_PADLEN_OFFSET,
&pad_len, SXEVF_IPSEC_PADLEN_BYTE);
if (unlikely(ret)) {
LOG_ERROR("auth_len:%d offset:%d copy skb\n"
"\tfailed.(err:%d)\n",
auth_len,
first->skb->len -
SXEVF_IPSEC_PADLEN_OFFSET,
ret);
goto l_out;
}
tucmd_ipsec |= (SXEVF_IPSEC_PADLEN_OFFSET + pad_len);
}
}
if (sa[vf_sa_idx].encrypt)
tucmd_ipsec |= SXEVF_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN;
sxevf_ctxt_desc_sa_idx_set(ctxt_desc, vf_sa_idx);
sxevf_ctxt_desc_tucmd_set(ctxt_desc, tucmd_ipsec);
l_out:
return ret;
}
void sxevf_rx_ipsec_proc(struct sxevf_ring *tx_ring,
union sxevf_rx_data_desc *desc, struct sk_buff *skb)
{
s32 ret = 0;
struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
__le16 pkt_info = desc->wb.lower.lo_dword.hs_rss.pkt_info;
struct iphdr *ip4_hdr = NULL;
struct ipv6hdr *ip6_hdr = NULL;
void *daddr = NULL;
unsigned long daddr_len;
u8 *sec_hdr = NULL;
struct xfrm_state *xs = NULL;
struct xfrm_offload *offload = NULL;
__be32 spi;
u8 proto;
if (!sxevf_status_err_check(desc, SXEVF_RXD_STAT_SECP)) {
LOG_DEBUG("not security packet, no need parse\n"
"\tsecurity header.\n");
goto l_out;
}
if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPV4)) {
ip4_hdr = (struct iphdr *)(skb->data + ETH_HLEN);
daddr = &ip4_hdr->daddr;
daddr_len = sizeof(ip4_hdr->daddr);
sec_hdr = (u8 *)ip4_hdr + ip4_hdr->ihl * SXEVF_IP_HEAD_LEN_UNIT;
} else if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPV6)) {
ip6_hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
daddr = &ip6_hdr->daddr;
daddr_len = sizeof(ip6_hdr->daddr);
sec_hdr = (u8 *)ip6_hdr + sizeof(struct ipv6hdr);
} else {
ret = -SXEVF_ERR_DEVICE_NOT_SUPPORTED;
LOG_ERROR("sxe security not support L3 protocol:0x%x.(err:%d)\n",
desc->wb.lower.lo_dword.hs_rss.pkt_info, ret);
goto l_out;
};
if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPSEC_ESP)) {
spi = ((struct ip_esp_hdr *)sec_hdr)->spi;
proto = IPPROTO_ESP;
} else if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPSEC_AH)) {
spi = ((struct ip_auth_hdr *)sec_hdr)->spi;
proto = IPPROTO_AH;
} else {
ret = -SXEVF_ERR_DEVICE_NOT_SUPPORTED;
LOG_ERROR("sxe security not support security protocol:0x%x.(err:%d)\n",
desc->wb.lower.lo_dword.hs_rss.pkt_info, ret);
goto l_out;
}
xs = sxevf_ipsec_rx_sa_match(ipsec, spi, proto, daddr,
*(u8 *)&daddr_len);
if (!xs) {
ret = -SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT;
LOG_ERROR("spi:0x%x, proto:0x%x daddr:%pI6 daddr_len:%lu\n"
"\tnot matched sw rx sa entry.(err:%d)",
spi, proto, daddr, daddr_len, ret);
goto l_out;
}
skb->sp = secpath_dup(skb->sp);
if (unlikely(!skb->sp)) {
LOG_INFO("skb security path null.\n");
goto l_out;
}
skb->sp->xvec[skb->sp->len++] = xs;
skb->sp->olen++;
offload = xfrm_offload(skb);
offload->flags = CRYPTO_DONE;
offload->status = CRYPTO_SUCCESS;
ipsec->rx_ipsec++;
l_out:
;
}
static bool sxevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
bool ret = true;
if (xs->props.family == AF_INET) {
if (ip_hdr(skb)->ihl != 5) {
LOG_ERROR("sxe ipsec offload unsupport ipv4\n"
"\theader with option, hdr len:%d.\n",
ip_hdr(skb)->ihl);
ret = false;
}
} else {
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) {
LOG_ERROR("sxe ipsec offload unsupport ipv6\n"
"\theader with ext hdr\n");
ret = false;
}
}
return ret;
}
static s32 sxevf_ipsec_param_valid(struct xfrm_state *xs)
{
s32 ret = -EINVAL;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
LOG_DEV_ERR("flags:%u offload:0x%lx unsupport\n"
"\tsecurity protol:0x%x.\n",
xs->xso.flags, xs->xso.offload_handle,
xs->id.proto);
goto l_out;
}
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
if (xs->calg) {
LOG_DEV_ERR("proto:%u flags:%u offload:0x%lx unsupport\n"
"\tcompression offload\n",
xs->id.proto, xs->xso.flags,
xs->xso.offload_handle);
goto l_out;
}
}
ret = 0;
LOG_INFO("proto:%u flags:%u offload:0x%lx ipsec param valid pass\n",
xs->id.proto, xs->xso.flags, xs->xso.offload_handle);
l_out:
return ret;
}
static s32 sxevf_ipsec_sa_idx_get(struct sxevf_ipsec_context *ipsec, bool is_rx)
{
s32 ret = -ENOSPC;
u16 i;
if (is_rx) {
if (ipsec->rx_sa_cnt == SXEVF_IPSEC_SA_CNT_MAX) {
LOG_ERROR("ipsec rx sa cnt reach limit:%u.\n",
SXEVF_IPSEC_SA_CNT_MAX);
goto l_out;
}
for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) {
if (!test_and_set_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->rx_table[i].status)) {
ret = i;
break;
}
}
} else {
if (ipsec->tx_sa_cnt == SXEVF_IPSEC_SA_CNT_MAX) {
LOG_ERROR("ipsec tx sa cnt reach limit:%u.\n",
SXEVF_IPSEC_SA_CNT_MAX);
goto l_out;
}
for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) {
if (!test_and_set_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->tx_table[i].status)) {
ret = i;
break;
}
}
}
l_out:
return ret;
}
static s32 sxevf_ipsec_key_salt_parse(struct xfrm_state *xs, u32 *key,
u32 *salt)
{
s32 ret = 0;
s8 *xs_key;
unsigned long len;
if (!xs->aead) {
ret = -EINVAL;
LOG_DEV_ERR("ipsec offload algorithm unsupport.(err:%d)\n", ret);
goto l_out;
}
if (xs->aead->alg_icv_len != SXEVF_IPSEC_AUTH_BIT_LEN) {
ret = -EINVAL;
LOG_DEV_ERR("ipsec offload icv len:%u\n"
"\tunsupport.(err:%d)\n",
xs->aead->alg_icv_len, ret);
goto l_out;
}
if (strcmp(xs->aead->alg_name, ipsec_aes_name)) {
ret = -EINVAL;
LOG_DEV_ERR("unsupport alg name:%s, just support alg:%s.(err:%d)\n",
xs->aead->alg_name, ipsec_aes_name, ret);
goto l_out;
}
xs_key = xs->aead->alg_key;
len = xs->aead->alg_key_len;
if (len == SXEVF_IPSEC_KEY_SALT_BIT_LEN) {
*salt = *(u32 *)(xs_key + SXEVF_IPSEC_KEY_BYTE_LEN);
} else if (len == SXEVF_IPSEC_KEY_BIT_LEN) {
*salt = 0;
} else {
ret = -EINVAL;
LOG_DEV_ERR("unsupport key_salt len:%lu.(err:%d)\n", len, ret);
goto l_out;
}
memcpy(key, xs_key, sizeof(u32) * SXEVF_IPSEC_KEY_LEN);
LOG_INFO("ipsec offload flag:0x%x key_salt len:%lu\n"
"\tsalt:%u key:0x%x%x%x%x.\n",
xs->xso.flags, len, *salt, key[0], key[1], key[2], key[3]);
l_out:
return ret;
}
static s32 sxevf_ipsec_rx_sa_entry_fill(struct xfrm_state *xs,
struct sxevf_rx_sa *sa_entry)
{
s32 ret;
memset(sa_entry, 0, sizeof(*sa_entry));
sa_entry->xs = xs;
if (xs->id.proto & IPPROTO_ESP)
sa_entry->decrypt = !!((xs->ealg) || (xs->aead));
ret = sxevf_ipsec_key_salt_parse(xs, sa_entry->key, &sa_entry->salt);
if (ret) {
LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n",
ret);
goto l_out;
}
if (xs->props.family == AF_INET6)
memcpy(sa_entry->ip_addr, &xs->id.daddr.a6, SXEVF_IPV6_ADDR_SIZE);
else
memcpy(&sa_entry->ip_addr[SXEVF_IPV4_ADDR_SIZE - 1],
&xs->id.daddr.a4, SXEVF_IPV4_ADDR_SIZE);
sa_entry->mode = SXEVF_IPSEC_RXMOD_VALID;
if (sa_entry->xs->id.proto & IPPROTO_ESP)
sa_entry->mode |= SXEVF_IPSEC_RXMOD_PROTO_ESP;
if (sa_entry->decrypt)
sa_entry->mode |= SXEVF_IPSEC_RXMOD_DECRYPT;
if (sa_entry->xs->props.family == AF_INET6)
sa_entry->mode |= SXEVF_IPSEC_RXMOD_IPV6;
l_out:
return ret;
}
static s32 sxevf_ipsec_sa_add_to_pf(struct sxevf_adapter *adapter,
struct xfrm_state *xs, u32 *pf_sa_idx)
{
s32 ret;
spin_lock_bh(&adapter->mbx_lock);
ret = sxevf_ipsec_sa_add(adapter, xs, pf_sa_idx);
spin_unlock_bh(&adapter->mbx_lock);
if (ret) {
LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
xs->xso.flags, xs->id.spi, xs->id.proto,
xs->props.family, ret);
}
return ret;
}
static s32 sxevf_ipsec_rx_xs_add(struct sxevf_adapter *adapter,
struct xfrm_state *xs)
{
struct sxevf_rx_sa sa_entry;
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
u32 vf_sa_idx;
s32 ret;
u32 pf_sa_idx;
ret = sxevf_ipsec_sa_idx_get(ipsec, true);
if (ret < 0) {
LOG_DEV_ERR("rx_sa_cnt:%d rx sa table no space.(err:%d)\n",
ipsec->rx_sa_cnt, ret);
goto l_out;
}
vf_sa_idx = (u32)ret;
sa_entry.status = ipsec->rx_table[vf_sa_idx].status;
ret = sxevf_ipsec_rx_sa_entry_fill(xs, &sa_entry);
if (ret) {
LOG_ERROR("ipsec offload param parse fail.(err:%d)\n", ret);
goto clear_used_xs;
}
ret = sxevf_ipsec_sa_add_to_pf(adapter, xs, &pf_sa_idx);
if (ret) {
LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
xs->xso.flags, xs->id.spi, xs->id.proto,
xs->props.family, ret);
goto clear_used_xs;
}
memcpy(&ipsec->rx_table[vf_sa_idx], &sa_entry, sizeof(sa_entry));
sa_entry.pf_sa_idx = pf_sa_idx;
xs->xso.offload_handle = vf_sa_idx + SXEVF_IPSEC_RX_INDEX_BASE;
ipsec->rx_sa_cnt++;
LOG_INFO("tx_sa_table[%u] add done pf_sa_idx:%u rx_sa_cnt:%u.\n",
vf_sa_idx, pf_sa_idx, ipsec->rx_sa_cnt);
l_out:
return ret;
clear_used_xs:
clear_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->rx_table[vf_sa_idx].status);
return ret;
}
static s32 sxevf_ipsec_tx_xs_add(struct sxevf_adapter *adapter,
struct xfrm_state *xs)
{
struct sxevf_tx_sa sa_entry;
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
u32 vf_sa_idx;
s32 ret;
u32 pf_sa_idx;
ret = sxevf_ipsec_sa_idx_get(ipsec, false);
if (ret < 0) {
LOG_DEV_ERR("tx_sa_cnt:%d tx sa table no space.(err:%d)\n",
ipsec->tx_sa_cnt, ret);
goto l_out;
}
vf_sa_idx = (u32)ret;
memset(&sa_entry, 0, sizeof(struct sxevf_tx_sa));
sa_entry.xs = xs;
sa_entry.status = ipsec->tx_table[vf_sa_idx].status;
if (xs->id.proto & IPPROTO_ESP)
sa_entry.encrypt = !!((xs->ealg) || (xs->aead));
ret = sxevf_ipsec_key_salt_parse(xs, sa_entry.key, &sa_entry.salt);
if (ret) {
LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n",
ret);
goto clear_used_xs;
}
ret = sxevf_ipsec_sa_add_to_pf(adapter, xs, &pf_sa_idx);
if (ret) {
LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
xs->xso.flags, xs->id.spi, xs->id.proto,
xs->props.family, ret);
goto clear_used_xs;
}
memcpy(&ipsec->tx_table[vf_sa_idx], &sa_entry, sizeof(sa_entry));
sa_entry.pf_sa_idx = pf_sa_idx;
xs->xso.offload_handle = vf_sa_idx + SXEVF_IPSEC_TX_INDEX_BASE;
ipsec->tx_sa_cnt++;
LOG_INFO("tx_sa_table[%u] add done pf_sa_idx:%u tx_sa_cnt:%u.\n",
vf_sa_idx, pf_sa_idx, ipsec->tx_sa_cnt);
l_out:
return ret;
clear_used_xs:
clear_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->tx_table[vf_sa_idx].status);
return ret;
}
static s32 sxevf_ipsec_state_add(struct xfrm_state *xs)
{
s32 ret;
struct net_device *net_dev = xs->xso.dev;
struct sxevf_adapter *adapter = netdev_priv(net_dev);
ret = sxevf_ipsec_param_valid(xs);
if (ret) {
LOG_ERROR("ipsec offload param invalid.(err:%d)\n", ret);
goto l_out;
}
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
ret = sxevf_ipsec_rx_xs_add(adapter, xs);
else
ret = sxevf_ipsec_tx_xs_add(adapter, xs);
if (ret) {
LOG_ERROR("offload_handle:%lu flag:0x%x sa add fail.(err:%d)\n",
xs->xso.offload_handle, xs->xso.flags, ret);
goto l_out;
}
l_out:
return ret;
}
static void sxevf_ipsec_state_delete(struct xfrm_state *xs)
{
struct net_device *netdev = xs->xso.dev;
struct sxevf_adapter *adapter = netdev_priv(netdev);
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
u32 vf_sa_idx;
u32 pf_sa_idx;
s32 ret;
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
vf_sa_idx = xs->xso.offload_handle - SXEVF_IPSEC_RX_INDEX_BASE;
pf_sa_idx = ipsec->rx_table[vf_sa_idx].pf_sa_idx;
if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->rx_table[vf_sa_idx].status)) {
LOG_DEV_ERR("vf_sa_idx:%d not in used, offload_handle: %lu.\n",
vf_sa_idx, xs->xso.offload_handle);
goto l_end;
}
spin_lock_bh(&adapter->mbx_lock);
ret = sxevf_ipsec_sa_del(adapter, pf_sa_idx);
spin_unlock_bh(&adapter->mbx_lock);
if (ret) {
LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x\n"
"\tflags:0x%x del fail.(err:%d)\n",
vf_sa_idx, pf_sa_idx, xs->xso.flags, ret);
goto l_end;
}
hash_del_rcu(&ipsec->rx_table[vf_sa_idx].hlist);
memset(&ipsec->rx_table[vf_sa_idx], 0,
sizeof(struct sxevf_rx_sa));
ipsec->rx_sa_cnt--;
} else {
vf_sa_idx = xs->xso.offload_handle - SXEVF_IPSEC_TX_INDEX_BASE;
pf_sa_idx = ipsec->tx_table[vf_sa_idx].pf_sa_idx;
if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED,
&ipsec->tx_table[vf_sa_idx].status)) {
LOG_DEV_ERR("vf_sa_idx:%d not in used, offload_handle: %lu.\n",
vf_sa_idx, xs->xso.offload_handle);
goto l_end;
}
spin_lock_bh(&adapter->mbx_lock);
ret = sxevf_ipsec_sa_del(adapter, pf_sa_idx);
spin_unlock_bh(&adapter->mbx_lock);
if (ret) {
LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x\n"
"\tflags:0x%x del fail.(err:%d)\n",
vf_sa_idx, pf_sa_idx, xs->xso.flags, ret);
goto l_end;
}
memset(&ipsec->tx_table[vf_sa_idx], 0,
sizeof(struct sxevf_tx_sa));
ipsec->tx_sa_cnt--;
}
LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x flags:0x%x del done.\n",
vf_sa_idx, pf_sa_idx, xs->xso.flags);
l_end:
;
}
static const struct xfrmdev_ops sxevf_xfrmdev_ops = {
.xdo_dev_offload_ok = sxevf_ipsec_offload_ok,
.xdo_dev_state_add = sxevf_ipsec_state_add,
.xdo_dev_state_delete = sxevf_ipsec_state_delete,
};
void sxevf_ipsec_offload_init(struct sxevf_adapter *adapter)
{
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
u32 size;
hash_init(ipsec->rx_table_list);
size = sizeof(struct sxevf_rx_sa) * SXEVF_IPSEC_SA_CNT_MAX;
ipsec->rx_table = kzalloc(size, GFP_KERNEL);
if (!ipsec->rx_table) {
LOG_DEV_ERR("ipsec rx sa table mem:%uB alloc fail.\n", size);
goto l_out;
}
size = sizeof(struct sxevf_tx_sa) * SXEVF_IPSEC_SA_CNT_MAX;
ipsec->tx_table = kzalloc(size, GFP_KERNEL);
if (!ipsec->tx_table) {
LOG_DEV_ERR("ipsec tx sa table mem:%uB alloc fail.\n", size);
goto l_free_rx_table;
}
ipsec->rx_sa_cnt = 0;
ipsec->tx_sa_cnt = 0;
adapter->netdev->xfrmdev_ops = &sxevf_xfrmdev_ops;
LOG_INFO("ipsec init done.\n");
l_out:
return;
l_free_rx_table:
SXEVF_KFREE(ipsec->rx_table);
}
void sxevf_ipsec_offload_exit(struct sxevf_adapter *adapter)
{
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
SXEVF_KFREE(ipsec->rx_table);
SXEVF_KFREE(ipsec->tx_table);
LOG_INFO("ipsec exit done.\n");
}
void sxevf_ipsec_restore(struct sxevf_adapter *adapter)
{
struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt;
struct sxevf_rx_sa *rx_sa;
struct sxevf_tx_sa *tx_sa;
u16 i;
s32 ret;
if (!(adapter->netdev->features & NETIF_F_HW_ESP))
goto l_end;
for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) {
rx_sa = &ipsec->rx_table[i];
tx_sa = &ipsec->tx_table[i];
if (test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &rx_sa->status)) {
ret = sxevf_ipsec_sa_add_to_pf(adapter, rx_sa->xs, NULL);
if (ret) {
LOG_DEV_ERR("rx xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
rx_sa->xs->xso.flags, rx_sa->xs->id.spi,
rx_sa->xs->id.proto,
rx_sa->xs->props.family, ret);
}
}
if (test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &tx_sa->status)) {
ret = sxevf_ipsec_sa_add_to_pf(adapter, tx_sa->xs, NULL);
if (ret) {
LOG_DEV_ERR("tx xfrm state flags:0x%x spi:0x%x proto:0x%x\n"
"\tfamily:0x%x add to pf fail.(err:%d)\n",
tx_sa->xs->xso.flags, tx_sa->xs->id.spi,
tx_sa->xs->id.proto,
tx_sa->xs->props.family, ret);
}
}
l_end:
;
}
#endif

View File

@ -0,0 +1,98 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_ipsec.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_IPSEC_H__
#define __SXEVF_IPSEC_H__
#include "sxevf_ring.h"
struct sxevf_adapter;
#define SXEVF_IPSEC_SA_CNT_MAX (1024)
#define SXEVF_IPSEC_RX_INDEX_BASE (0)
#define SXEVF_IPSEC_TX_INDEX_BASE (SXEVF_IPSEC_SA_CNT_MAX)
#define SXEVF_IPSEC_AUTH_BIT_LEN (128)
#define SXEVF_IPSEC_SA_ENTRY_USED (0x1)
#define SXEVF_IPSEC_PADLEN_OFFSET ((SXEVF_IPSEC_AUTH_BIT_LEN / 8) + 2)
#define SXEVF_IPSEC_PADLEN_BYTE (1)
#define SXEVF_IPSEC_IP_LEN (4)
#define SXEVF_IPSEC_KEY_LEN (4)
#define SXEVF_IPSEC_KEY_SALT_BIT_LEN (160)
#define SXEVF_IPSEC_KEY_BIT_LEN (128)
#define SXEVF_IPSEC_KEY_SALT_BYTE_LEN (SXEVF_IPSEC_KEY_SALT_BIT_LEN / 8)
#define SXEVF_IPSEC_KEY_BYTE_LEN (SXEVF_IPSEC_KEY_BIT_LEN / 8)
#define SXEVF_IPV4_ADDR_SIZE (4)
#define SXEVF_IPV6_ADDR_SIZE (16)
#define SXEVF_IPSEC_RXMOD_VALID 0x00000001
#define SXEVF_IPSEC_RXMOD_PROTO_ESP 0x00000004
#define SXEVF_IPSEC_RXMOD_DECRYPT 0x00000008
#define SXEVF_IPSEC_RXMOD_IPV6 0x00000010
#define SXEVF_IPSEC_RXTXMOD_VF 0x00000020
#define SXEVF_ESP_FEATURES \
(NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | NETIF_F_GSO_ESP)
struct sxevf_tx_sa {
struct xfrm_state *xs;
u32 key[SXEVF_IPSEC_KEY_LEN];
u32 salt;
u32 mode;
bool encrypt;
u32 pf_sa_idx;
unsigned long status;
};
struct sxevf_rx_sa {
struct hlist_node hlist;
struct xfrm_state *xs;
u32 key[SXEVF_IPSEC_KEY_LEN];
u32 salt;
__be32 ip_addr[SXEVF_IPSEC_IP_LEN];
u32 mode;
u32 pf_sa_idx;
bool decrypt;
unsigned long status;
};
struct sxevf_ipsec_context {
u16 rx_sa_cnt;
u16 tx_sa_cnt;
u64 rx_ipsec;
struct sxevf_rx_sa *rx_table;
struct sxevf_tx_sa *tx_table;
DECLARE_HASHTABLE(rx_table_list, 10);
};
void sxevf_ipsec_offload_init(struct sxevf_adapter *adapter);
void sxevf_ipsec_offload_exit(struct sxevf_adapter *adapter);
void sxevf_rx_ipsec_proc(struct sxevf_ring *tx_ring,
union sxevf_rx_data_desc *desc, struct sk_buff *skb);
s32 sxevf_tx_ipsec_offload(struct sxevf_ring *tx_ring,
struct sxevf_tx_buffer *first,
struct sxevf_tx_context_desc *ctxt_desc);
void sxevf_ipsec_restore(struct sxevf_adapter *adapter);
#endif

View File

@ -0,0 +1,885 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_irq.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/numa.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/ethtool.h>
#include "sxevf.h"
#ifdef HAVE_NO_OVERFLOW_H
#include <sxe_compat_overflow.h>
#else
#include <linux/overflow.h>
#endif
#include "sxevf_irq.h"
#include "sxe_log.h"
#include "sxevf_monitor.h"
#include "sxevf_rx_proc.h"
#include "sxevf_tx_proc.h"
#include "sxevf_netdev.h"
#ifdef NETIF_NAPI_ADD_API_NEED_3_PARAMS
static inline void netif_napi_add_compat(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
netif_napi_add(dev, napi, poll);
}
#define netif_napi_add(dev, napi, poll, weight) \
netif_napi_add_compat(dev, napi, poll, weight)
#endif
s32 sxevf_irq_coalesce_get(struct net_device *netdev,
struct ethtool_coalesce *user)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
u16 rx_itr = adapter->irq_ctxt.rx_irq_interval;
u16 tx_itr = adapter->irq_ctxt.tx_irq_interval;
struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[0];
bool is_mixed;
s32 ret = 0;
if (irq_data->tx.list.cnt && irq_data->rx.list.cnt)
is_mixed = true;
else
is_mixed = false;
if (rx_itr == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
user->rx_coalesce_usecs = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE;
else
user->rx_coalesce_usecs = rx_itr >> SXEVF_EITR_ITR_SHIFT;
if (is_mixed) {
LOG_INFO_BDF("interrupt 0 has both rx and tx ring,\n"
"\tjust report rx itr:%u.\n",
user->rx_coalesce_usecs);
goto l_out;
}
if (tx_itr == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
user->tx_coalesce_usecs = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE;
else
user->tx_coalesce_usecs = tx_itr >> SXEVF_EITR_ITR_SHIFT;
LOG_INFO_BDF("rx irq interval:%u tx irq interval:%u.\n", rx_itr,
tx_itr);
l_out:
return ret;
}
s32 sxevf_irq_coalesce_set(struct net_device *netdev,
struct ethtool_coalesce *user)
{
struct sxevf_adapter *adapter = netdev_priv(netdev);
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[0];
u16 tx_itr, tx_itr_old;
u16 rx_itr;
u8 i;
bool is_mixed;
bool need_rst = false;
u32 itr_max = SXEVF_EITR_ITR_MAX;
s32 ret = 0;
if (user->rx_coalesce_usecs > itr_max ||
user->tx_coalesce_usecs > itr_max) {
ret = -EINVAL;
LOG_ERROR_BDF("user param invalid, rx_coalesce_usecs:%u\n"
"\ttx_coalesce_usecs:%u max:%u.(err:%d)\n",
user->rx_coalesce_usecs, user->tx_coalesce_usecs,
itr_max, ret);
goto l_out;
}
if (irq_data->tx.list.cnt && irq_data->rx.list.cnt)
is_mixed = true;
else
is_mixed = false;
if (is_mixed) {
if (user->tx_coalesce_usecs) {
ret = -EINVAL;
LOG_ERROR_BDF("irq both has rx and rx ring, rx_coalesce_usecs:%u\n"
"\ttx_coalesce_usecs:%u invalid.(err:%d)\n",
user->rx_coalesce_usecs,
user->tx_coalesce_usecs, ret);
goto l_out;
}
tx_itr_old = adapter->irq_ctxt.rx_irq_interval;
} else {
tx_itr_old = adapter->irq_ctxt.rx_irq_interval;
}
if (user->rx_coalesce_usecs > SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
adapter->irq_ctxt.rx_irq_interval = user->rx_coalesce_usecs
<< SXEVF_EITR_ITR_SHIFT;
else
adapter->irq_ctxt.rx_irq_interval = user->rx_coalesce_usecs;
if (adapter->irq_ctxt.rx_irq_interval ==
SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
rx_itr = SXEVF_IRQ_INTERVAL_20K;
else
rx_itr = adapter->irq_ctxt.rx_irq_interval;
if (user->tx_coalesce_usecs > SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
adapter->irq_ctxt.tx_irq_interval = user->tx_coalesce_usecs
<< SXEVF_EITR_ITR_SHIFT;
else
adapter->irq_ctxt.tx_irq_interval = user->tx_coalesce_usecs;
if (adapter->irq_ctxt.tx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
tx_itr = SXEVF_IRQ_INTERVAL_12K;
else
tx_itr = adapter->irq_ctxt.tx_irq_interval;
if (is_mixed)
adapter->irq_ctxt.tx_irq_interval =
adapter->irq_ctxt.rx_irq_interval;
if (!!adapter->irq_ctxt.tx_irq_interval != !!tx_itr_old)
need_rst = true;
for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) {
irq_data = adapter->irq_ctxt.irq_data[i];
if (irq_data->tx.list.cnt && !irq_data->rx.list.cnt)
irq_data->irq_interval = tx_itr;
else
irq_data->irq_interval = rx_itr;
hw->irq.ops->ring_irq_interval_set(hw, i,
irq_data->irq_interval);
}
if (need_rst) {
if (netif_running(netdev))
sxevf_hw_reinit(adapter);
else
sxevf_reset(adapter);
}
LOG_INFO_BDF("user tx_coalesce_usecs:%u rx_coalesce_usecs:%u\n"
"\tadapter tx_irq_interval:%u rx_irq_interval:%u\n"
"\ttx_itr:%u rx_itr:%u need_rst:%u is_misxed:%u.\n",
user->tx_coalesce_usecs, user->rx_coalesce_usecs,
adapter->irq_ctxt.tx_irq_interval,
adapter->irq_ctxt.rx_irq_interval, tx_itr, rx_itr,
need_rst, is_mixed);
l_out:
return ret;
}
static void sxevf_irq_num_init(struct sxevf_adapter *adapter)
{
u16 ring_irq;
u16 cpu_cnt = num_online_cpus();
#ifdef PCI_MSIX_VEC_COUNT_CHECK
u16 msix_num =
pci_msix_vec_count(adapter->pdev) - SXEVF_NON_QUEUE_IRQ_NUM;
#endif
ring_irq = max(adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num);
ring_irq = min_t(u16, ring_irq, cpu_cnt);
#ifdef PCI_MSIX_VEC_COUNT_CHECK
ring_irq = min_t(u16, ring_irq, msix_num);
#endif
adapter->irq_ctxt.total_irq_num = ring_irq + SXEVF_NON_QUEUE_IRQ_NUM;
adapter->irq_ctxt.ring_irq_num = ring_irq;
LOG_INFO_BDF("msi-x interrupt rxr_num:%u txr_num:%u\n"
"\txdp_num:%u cpu cnt:%u\n"
"\ttotal_irq_num:%u ring_irq_num:%u\n",
adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num,
adapter->xdp_ring_ctxt.num, cpu_cnt,
adapter->irq_ctxt.total_irq_num,
adapter->irq_ctxt.ring_irq_num);
}
static s32 sxevf_msix_irq_init(struct sxevf_adapter *adapter)
{
u16 i;
s32 ret;
u16 total = adapter->irq_ctxt.total_irq_num;
adapter->irq_ctxt.msix_entries =
kcalloc(total, sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->irq_ctxt.msix_entries) {
ret = -ENOMEM;
LOG_ERROR_BDF("msi-x irq entry\n"
"\tnum:%u per size:%lu kcalloc fail.\n"
"\t(err:%d)\n",
total, sizeof(struct msix_entry), ret);
goto l_out;
}
for (i = 0; i < total; i++)
adapter->irq_ctxt.msix_entries[i].entry = i;
ret = pci_enable_msix_range(adapter->pdev,
adapter->irq_ctxt.msix_entries,
SXEVF_MIN_MSIX_IRQ_NUM, total);
if (ret < 0) {
SXEVF_KFREE(adapter->irq_ctxt.msix_entries);
LOG_DEV_ERR("min:%u max:%u pci enable msi-x failed.(err:%d)\n",
SXEVF_MIN_MSIX_IRQ_NUM, total, ret);
} else {
if (ret != total) {
adapter->irq_ctxt.total_irq_num = ret;
adapter->irq_ctxt.ring_irq_num = ret - SXEVF_NON_QUEUE_IRQ_NUM;
}
LOG_INFO_BDF("enable pci msi-x success.result:%d maxCnt:%u\n"
"\ttotal irq num:%u ring irq num:%u\n",
ret, total, adapter->irq_ctxt.total_irq_num,
adapter->irq_ctxt.ring_irq_num);
ret = 0;
}
l_out:
return ret;
}
static void sxevf_irq_data_free(struct sxevf_adapter *adapter, u16 irq_idx)
{
u16 idx;
struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx];
struct sxevf_ring *ring;
sxevf_for_each_ring(irq_data->tx.list) {
adapter->tx_ring_ctxt.ring[ring->idx] = NULL;
}
if (irq_data->tx.xdp_ring) {
idx = irq_data->tx.xdp_ring->idx;
adapter->xdp_ring_ctxt.ring[idx] = NULL;
}
sxevf_for_each_ring(irq_data->rx.list) {
adapter->rx_ring_ctxt.ring[ring->idx] = NULL;
}
adapter->irq_ctxt.irq_data[irq_idx] = NULL;
netif_napi_del(&irq_data->napi);
kfree_rcu(irq_data, rcu);
}
static void sxevf_all_irq_data_free(struct sxevf_adapter *adapter)
{
u16 irq_idx = adapter->irq_ctxt.ring_irq_num;
while (irq_idx--)
sxevf_irq_data_free(adapter, irq_idx);
}
static s32 sxevf_irq_data_alloc(struct sxevf_adapter *adapter, u16 total_count,
u16 irq_idx)
{
s32 ret = 0;
struct sxevf_irq_data *irq_data;
irq_data =
kzalloc(struct_size(irq_data, ring, total_count), GFP_KERNEL);
if (!irq_data) {
ret = -ENOMEM;
LOG_ERROR_BDF("alloc interrupt data and ring resource\n"
"\tfailed. size: %ld irq_idx:%u\n"
"\tring count:%u.(err:%d)\n",
struct_size(irq_data, ring, total_count), irq_idx,
total_count, ret);
goto l_out;
}
netif_napi_add(adapter->netdev, &irq_data->napi, sxevf_poll,
SXEVF_NAPI_WEIGHT);
adapter->irq_ctxt.irq_data[irq_idx] = irq_data;
irq_data->adapter = adapter;
irq_data->irq_idx = irq_idx;
l_out:
return ret;
}
static void sxevf_irq_interval_init(struct sxevf_irq_context *irq_ctxt,
u16 irq_idx, u16 txr_cnt, u16 rxr_cnt)
{
struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx];
if (txr_cnt && !rxr_cnt) {
if (irq_ctxt->tx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
irq_data->irq_interval = SXEVF_IRQ_INTERVAL_12K;
else
irq_data->irq_interval = irq_ctxt->tx_irq_interval;
} else {
if (irq_ctxt->rx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
irq_data->irq_interval = SXEVF_IRQ_INTERVAL_20K;
else
irq_data->irq_interval = irq_ctxt->rx_irq_interval;
}
irq_data->tx.irq_rate.irq_interval = SXEVF_LOWEST_LATENCY;
irq_data->rx.irq_rate.irq_interval = SXEVF_LOWEST_LATENCY;
LOG_INFO("irq_idx:%u irq level interval:%u\n"
"\tlist level rx irq interval:%u tx irq interval:%u\n",
irq_idx, irq_data->irq_interval,
irq_data->rx.irq_rate.irq_interval,
irq_data->tx.irq_rate.irq_interval);
}
static s32 sxevf_irq_ring_bind(struct sxevf_adapter *adapter)
{
s32 ret = 0;
u16 rxr_idx = 0;
u16 txr_idx = 0;
u16 xdp_idx = 0;
u16 irq_idx = 0;
u16 irq_num = adapter->irq_ctxt.ring_irq_num;
u16 rxr_remain = adapter->rx_ring_ctxt.num;
u16 txr_remain = adapter->tx_ring_ctxt.num;
u16 xdp_remain = adapter->xdp_ring_ctxt.num;
u16 total_ring = rxr_remain + txr_remain + xdp_remain;
if (irq_num >= total_ring) {
for (; rxr_remain > 0; irq_idx++, irq_num--) {
u16 rxr_cnt = DIV_ROUND_UP(txr_remain, irq_num);
ret = sxevf_irq_data_alloc(adapter, rxr_cnt, irq_idx);
if (ret) {
LOG_ERROR_BDF("irq_num:%u rxr_remain:%u\n"
"\ttxr_remain:%u xdp_remain:%u\n"
"\tirq_idx:%u alloc rx irq\n"
"\tresource priority fail.(err:%d)\n",
irq_num, rxr_remain, txr_remain,
xdp_remain, irq_idx, ret);
goto l_error;
}
sxevf_irq_interval_init(&adapter->irq_ctxt, irq_idx, 0, 1);
sxevf_rx_ring_init(adapter, 0, 1, rxr_idx,
irq_idx, rxr_idx);
rxr_remain -= rxr_cnt;
rxr_idx += rxr_cnt;
}
LOG_INFO_BDF("alloc rx irq resource priority done.\n"
"\tirq_idx:%u rxr_idx:%u txr_remain:%u rxr_remain:%u\n"
"\txdp_remain:%u ring_irq_num:%u total_ring:%u\n",
irq_idx, rxr_idx, txr_remain, rxr_remain, xdp_remain,
irq_num, total_ring);
}
for (; irq_num; irq_idx++, irq_num--) {
u16 txr_cnt = DIV_ROUND_UP(txr_remain, irq_num);
u16 xdp_cnt = DIV_ROUND_UP(xdp_remain, irq_num);
u16 rxr_cnt = DIV_ROUND_UP(rxr_remain, irq_num);
u16 tx_reg_idx = txr_idx + xdp_idx;
u16 xdp_reg_idx = txr_cnt ? (tx_reg_idx + 1) : tx_reg_idx;
total_ring = txr_cnt + xdp_cnt + rxr_cnt;
LOG_DEBUG_BDF("irq_num:%u irq_idx:%u txr_cnt:%u xdp_cnt:%u\n"
"\trxr_cnt:%u base txr_idx:%u xdp_idx:%u\n"
"\trxr_idx:%u\n",
irq_num, irq_idx, txr_cnt, xdp_cnt, rxr_cnt,
txr_idx, xdp_idx, rxr_idx);
ret = sxevf_irq_data_alloc(adapter, total_ring, irq_idx);
if (ret) {
LOG_ERROR_BDF("irq_num:%u rxr_remain:%u txr_remain:%u\n"
"\txdp_remain:%u rxr_cnt:%u txr_cnt:%u\n"
"\txdp_cnt:%u ird_idx:%u alloc irq resource\n"
"\tfail.(err:%d)\n",
irq_num, rxr_remain, txr_remain, xdp_remain,
rxr_cnt, txr_cnt, xdp_cnt, irq_idx, ret);
goto l_error;
}
sxevf_irq_interval_init(&adapter->irq_ctxt, irq_idx, txr_cnt,
rxr_cnt);
sxevf_tx_ring_init(adapter, 0, txr_cnt, txr_idx,
irq_idx, tx_reg_idx);
sxevf_xdp_ring_init(adapter, txr_cnt, xdp_cnt,
xdp_idx, irq_idx, xdp_reg_idx);
sxevf_rx_ring_init(adapter, txr_cnt + xdp_cnt, rxr_cnt,
rxr_idx, irq_idx, rxr_idx);
txr_remain -= txr_cnt;
xdp_remain -= xdp_cnt;
rxr_remain -= rxr_cnt;
txr_idx += txr_cnt;
xdp_idx += xdp_cnt;
rxr_idx += rxr_cnt;
}
return ret;
l_error:
adapter->irq_ctxt.ring_irq_num = 0;
adapter->tx_ring_ctxt.num = 0;
adapter->rx_ring_ctxt.num = 0;
adapter->xdp_ring_ctxt.num = 0;
while (irq_idx--)
sxevf_irq_data_free(adapter, irq_idx);
return ret;
}
static void sxevf_pci_irq_disable(struct sxevf_adapter *adapter)
{
pci_disable_msix(adapter->pdev);
if (adapter->irq_ctxt.msix_entries) {
SXEVF_KFREE(adapter->irq_ctxt.msix_entries);
adapter->irq_ctxt.msix_entries = NULL;
}
}
void sxevf_hw_irq_disable(struct sxevf_adapter *adapter)
{
u16 i;
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_irq_context *irq = &adapter->irq_ctxt;
hw->irq.ops->irq_disable(hw);
for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++)
synchronize_irq(irq->msix_entries[i].vector);
synchronize_irq(irq->msix_entries[i].vector);
}
void sxevf_irq_release(struct sxevf_adapter *adapter)
{
u16 irq_idx;
struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
if (!irq_ctxt->msix_entries)
goto l_out;
for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) {
struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx];
struct msix_entry *entry = &irq_ctxt->msix_entries[irq_idx];
if (!irq_data->rx.list.next && !irq_data->tx.list.next &&
!irq_data->tx.xdp_ring)
continue;
free_irq(entry->vector, irq_data);
}
free_irq(irq_ctxt->msix_entries[irq_idx].vector, adapter);
l_out:
;
}
s32 sxevf_irq_ctxt_init(struct sxevf_adapter *adapter)
{
s32 ret;
adapter->irq_ctxt.rx_irq_interval = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE;
adapter->irq_ctxt.tx_irq_interval = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE;
sxevf_irq_num_init(adapter);
ret = sxevf_msix_irq_init(adapter);
if (ret) {
LOG_DEV_ERR("msix irq init fail.(err:%d)\n", ret);
goto l_out;
}
ret = sxevf_irq_ring_bind(adapter);
if (ret) {
LOG_DEV_ERR("interrupt and ring bind fail.(err:%d)\n", ret);
goto l_disable_irq;
}
LOG_INFO_BDF("adapter rx_irq_interval:%u tx_irq_interval:%u.\n",
adapter->irq_ctxt.rx_irq_interval,
adapter->irq_ctxt.tx_irq_interval);
l_out:
return ret;
l_disable_irq:
sxevf_pci_irq_disable(adapter);
return ret;
}
void sxevf_irq_ctxt_exit(struct sxevf_adapter *adapter)
{
sxevf_all_irq_data_free(adapter);
sxevf_pci_irq_disable(adapter);
adapter->irq_ctxt.ring_irq_num = 0;
adapter->tx_ring_ctxt.num = 0;
adapter->rx_ring_ctxt.num = 0;
adapter->xdp_ring_ctxt.num = 0;
}
static bool sxevf_set_irq_name(struct sxevf_irq_data *irq_data, char *dev_name,
u16 *rx_idx, u16 *tx_idx)
{
if (irq_data->tx.list.next && irq_data->rx.list.next) {
snprintf(irq_data->name, sizeof(irq_data->name), "%s-TxRx-%u",
dev_name, (*rx_idx)++);
(*tx_idx)++;
} else if (irq_data->rx.list.next) {
snprintf(irq_data->name, sizeof(irq_data->name), "%s-Rx-%u",
dev_name, (*rx_idx)++);
} else if (irq_data->tx.list.next || irq_data->tx.xdp_ring) {
snprintf(irq_data->name, sizeof(irq_data->name), "%s-Tx-%u",
dev_name, (*tx_idx)++);
} else {
LOG_INFO("%u irq has no ring bind.\n", irq_data->irq_idx);
return false;
}
return true;
}
static irqreturn_t sxevf_ring_irq_handler(int irq, void *data)
{
struct sxevf_irq_data *irq_data = data;
if (irq_data->tx.list.next || irq_data->rx.list.next ||
irq_data->tx.xdp_ring)
napi_schedule_irqoff(&irq_data->napi);
return IRQ_HANDLED;
}
static irqreturn_t sxevf_event_irq_handler(int irq, void *data)
{
struct sxevf_adapter *adapter = data;
struct sxevf_hw *hw = &adapter->hw;
set_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state);
sxevf_monitor_work_schedule(adapter);
hw->irq.ops->specific_irq_enable(hw, adapter->irq_ctxt.mailbox_irq);
LOG_INFO_BDF("rcv event irq:%d\n", irq);
return IRQ_HANDLED;
}
static s32 sxevf_msix_request_irqs(struct sxevf_adapter *adapter)
{
s32 ret;
u16 rx_idx = 0;
u16 tx_idx = 0;
u16 irq_idx;
struct sxevf_irq_data *irq_data;
struct msix_entry *entry;
struct net_device *netdev = adapter->netdev;
struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
if (!irq_ctxt->ring_irq_num) {
ret = -SXEVF_ERR_IRQ_NUM_INVALID;
LOG_ERROR_BDF("irq_num:%d request irq fail,\n"
"\tinvalid retry open\n"
"\tneed reload ko.(err:%d)\n",
irq_ctxt->ring_irq_num, ret);
goto l_out;
}
for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) {
irq_data = irq_ctxt->irq_data[irq_idx];
entry = &irq_ctxt->msix_entries[irq_idx];
if (!sxevf_set_irq_name(irq_data, netdev->name,
&rx_idx, &tx_idx))
continue;
ret = request_irq(entry->vector, &sxevf_ring_irq_handler, 0,
irq_data->name, irq_data);
if (ret) {
LOG_DEV_ERR("irq_idx:%u rx_idx:%u tx_idx:%u irq_num:%u\n"
"\tvector:%u msi-x ring interrupt\n"
"\trequest fail.(err:%d)\n",
irq_idx, rx_idx, tx_idx,
irq_ctxt->ring_irq_num, entry->vector, ret);
goto l_free_irq;
}
}
ret = request_irq(irq_ctxt->msix_entries[irq_idx].vector,
sxevf_event_irq_handler, 0, netdev->name, adapter);
if (ret) {
LOG_DEV_ERR("irq_idx:%u vector:%u msi-x other interrupt\n"
"\trequest fail.(err:%d)\n",
irq_idx, irq_ctxt->msix_entries[irq_idx].vector,
ret);
goto l_free_irq;
}
l_out:
return ret;
l_free_irq:
while (irq_idx) {
irq_idx--;
free_irq(irq_ctxt->msix_entries[irq_idx].vector,
irq_ctxt->irq_data[irq_idx]);
}
return ret;
}
void sxevf_configure_msix_hw(struct sxevf_adapter *adapter)
{
u16 irq_idx;
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_ring *ring;
struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
irq_ctxt->irq_mask = 0;
for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) {
struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx];
sxevf_for_each_ring(irq_data->rx.list) {
hw->irq.ops->ring_irq_map(hw, false, ring->reg_idx,
irq_idx);
}
sxevf_for_each_ring(irq_data->tx.list) {
hw->irq.ops->ring_irq_map(hw, true, ring->reg_idx,
irq_idx);
}
if (irq_data->tx.xdp_ring) {
hw->irq.ops->ring_irq_map(hw, true,
irq_data->tx.xdp_ring->reg_idx,
irq_idx);
}
hw->irq.ops->ring_irq_interval_set(hw, irq_idx,
irq_data->irq_interval);
irq_ctxt->irq_mask |= BIT(irq_idx);
}
irq_ctxt->mailbox_irq = BIT(irq_idx);
irq_ctxt->irq_mask |= BIT(irq_idx);
hw->irq.ops->event_irq_map(hw, irq_idx);
}
static void sxevf_napi_enable_all(struct sxevf_adapter *adapter)
{
u16 irq_idx;
for (irq_idx = 0; irq_idx < adapter->irq_ctxt.ring_irq_num; irq_idx++)
napi_enable(&adapter->irq_ctxt.irq_data[irq_idx]->napi);
}
void sxevf_napi_disable(struct sxevf_adapter *adapter)
{
u16 irq_idx;
for (irq_idx = 0; irq_idx < adapter->irq_ctxt.ring_irq_num; irq_idx++)
napi_disable(&adapter->irq_ctxt.irq_data[irq_idx]->napi);
}
void sxevf_hw_irq_configure(struct sxevf_adapter *adapter)
{
struct sxevf_hw *hw = &adapter->hw;
sxevf_configure_msix_hw(adapter);
/* in order to force CPU ordering */
smp_mb__before_atomic();
clear_bit(SXEVF_DOWN, &adapter->state);
sxevf_napi_enable_all(adapter);
hw->irq.ops->pending_irq_clear(hw);
hw->irq.ops->irq_enable(hw, adapter->irq_ctxt.irq_mask);
}
s32 sxevf_irq_configure(struct sxevf_adapter *adapter)
{
s32 ret;
ret = sxevf_msix_request_irqs(adapter);
if (ret) {
LOG_DEV_ERR("irq_num:%d msi-x request irq failed, (err:%d)\n",
adapter->irq_ctxt.ring_irq_num, ret);
goto l_out;
}
sxevf_hw_irq_configure(adapter);
l_out:
return ret;
}
static void sxevf_irq_interval_update(struct sxevf_irq_data *irq_data,
struct sxevf_irq_rate *rate)
{
u32 bytes = rate->total_bytes;
u32 packets = rate->total_packets;
u16 old_irq_itr = irq_data->irq_interval >> SXEVF_EITR_ITR_SHIFT;
u64 bytes_rate;
u16 itr = rate->irq_interval;
if (packets == 0 || old_irq_itr == 0)
goto l_end;
bytes_rate = bytes / old_irq_itr;
switch (itr) {
case SXEVF_LOWEST_LATENCY:
if (bytes_rate > SXEVF_LOW_LATENCY_BYTE_RATE_MIN)
itr = SXEVF_LOW_LATENCY;
break;
case SXEVF_LOW_LATENCY:
if (bytes_rate > SXEVF_BULK_LATENCY_BYTE_RATE_MIN)
itr = SXEVF_BULK_LATENCY;
else if (bytes_rate <= SXEVF_LOW_LATENCY_BYTE_RATE_MIN)
itr = SXEVF_LOWEST_LATENCY;
break;
case SXEVF_BULK_LATENCY:
if (bytes_rate <= SXEVF_BULK_LATENCY_BYTE_RATE_MIN)
itr = SXEVF_LOW_LATENCY;
break;
}
rate->total_bytes = 0;
rate->total_packets = 0;
rate->irq_interval = itr;
l_end:
;
}
static void sxevf_irq_rate_adjust(struct sxevf_irq_data *irq_data)
{
u16 curr_itr;
u16 new_itr = irq_data->irq_interval;
struct sxevf_irq_rate *tx_rate = &irq_data->tx.irq_rate;
struct sxevf_irq_rate *rx_rate = &irq_data->rx.irq_rate;
struct sxevf_adapter *adapter = irq_data->adapter;
struct sxevf_hw *hw = &adapter->hw;
if (irq_data->tx.list.cnt)
sxevf_irq_interval_update(irq_data, tx_rate);
if (irq_data->rx.list.cnt)
sxevf_irq_interval_update(irq_data, rx_rate);
curr_itr = max(tx_rate->irq_interval, rx_rate->irq_interval);
switch (curr_itr) {
case SXEVF_LOWEST_LATENCY:
new_itr = SXEVF_IRQ_INTERVAL_100K;
break;
case SXEVF_LOW_LATENCY:
new_itr = SXEVF_IRQ_INTERVAL_20K;
break;
case SXEVF_BULK_LATENCY:
new_itr = SXEVF_IRQ_INTERVAL_12K;
break;
}
if (new_itr != irq_data->irq_interval) {
new_itr = (10 * new_itr * irq_data->irq_interval) /
((9 * new_itr) + irq_data->irq_interval);
irq_data->irq_interval = new_itr;
hw->irq.ops->ring_irq_interval_set(hw, irq_data->irq_idx,
irq_data->irq_interval);
}
}
s32 sxevf_poll(struct napi_struct *napi, int weight)
{
struct sxevf_irq_data *irq_data =
container_of(napi, struct sxevf_irq_data, napi);
struct sxevf_adapter *adapter = irq_data->adapter;
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_ring *ring;
s32 per_ring_budget;
s32 total_cleaned = 0;
bool clean_complete = true;
u32 cleaned = 0;
sxevf_for_each_ring(irq_data->tx.list) {
if (!sxevf_tx_ring_irq_clean(irq_data, ring, weight))
clean_complete = false;
}
ring = irq_data->tx.xdp_ring;
if (ring) {
if (!sxevf_xdp_ring_irq_clean(irq_data, ring, weight))
clean_complete = false;
}
if (weight <= 0)
return weight;
per_ring_budget = max(weight / irq_data->rx.list.cnt, 1);
LOG_DEBUG_BDF("weight:%d rings in irq=%u, per_ring_budget=%d\n", weight,
irq_data->rx.list.cnt, per_ring_budget);
sxevf_for_each_ring(irq_data->rx.list) {
cleaned = sxevf_rx_ring_irq_clean(irq_data, ring,
per_ring_budget);
total_cleaned += cleaned;
if (cleaned >= per_ring_budget)
clean_complete = false;
}
if (!clean_complete) {
LOG_WARN_BDF("weight:%d cleand:%u total_cleaned:%d\n"
"\tper_ring_budget:%d not complete\n",
weight, cleaned, total_cleaned, per_ring_budget);
return weight;
}
if (likely(napi_complete_done(napi, total_cleaned))) {
LOG_INFO_BDF("weight:%d cleand:%u total_cleaned:%d\n"
"\tper_ring_budget:%d complete done\n",
weight, cleaned, total_cleaned, per_ring_budget);
if (adapter->irq_ctxt.rx_irq_interval ==
SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE)
sxevf_irq_rate_adjust(irq_data);
if (!test_bit(SXEVF_DOWN, &adapter->state))
hw->irq.ops->specific_irq_enable(hw, BIT_ULL(irq_data->irq_idx));
}
return min(total_cleaned, weight - 1);
}

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_irq.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_IRQ_H__
#define __SXEVF_IRQ_H__
#include "sxevf_ring.h"
struct ethtool_coalesce;
#define SXEVF_NON_QUEUE_IRQ_NUM (1)
#define SXEVF_NAPI_WEIGHT (64)
#define SXEVF_MAX_MSIX_IRQ_NUM (2)
#define SXEVF_NON_QUEUE_IRQ_NUM (1)
#define SXEVF_MIN_QUEUE_IRQ_NUM (1)
#define SXEVF_MAX_QUEUE_IRQ_NUM (SXEVF_MAX_MSIX_IRQ_NUM)
#define SXEVF_MIN_MSIX_IRQ_NUM \
(SXEVF_NON_QUEUE_IRQ_NUM + SXEVF_MIN_QUEUE_IRQ_NUM)
#define SXEVF_IRQ_INTERVAL_12K (336)
#define SXEVF_IRQ_INTERVAL_20K (200)
#define SXEVF_IRQ_INTERVAL_100K (40)
#define SXEVF_IRQ_NAME_EXT_LEN (16)
#define SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE (1)
enum {
SXEVF_LOWEST_LATENCY = 0,
SXEVF_LOW_LATENCY,
SXEVF_BULK_LATENCY,
SXEVF_LATENCY_NR = 255,
};
#define SXEVF_LOW_LATENCY_BYTE_RATE_MIN 10
#define SXEVF_BULK_LATENCY_BYTE_RATE_MIN 20
struct sxevf_irq_rate {
unsigned long next_update;
unsigned int total_bytes;
unsigned int total_packets;
u16 irq_interval;
};
struct sxevf_list {
struct sxevf_ring *next;
u8 cnt;
};
struct sxevf_tx_context {
struct sxevf_list list;
struct sxevf_ring *xdp_ring;
struct sxevf_irq_rate irq_rate;
u16 work_limit;
};
struct sxevf_rx_context {
struct sxevf_list list;
struct sxevf_irq_rate irq_rate;
};
struct sxevf_irq_data {
struct sxevf_adapter *adapter;
u16 irq_idx;
u16 irq_interval;
struct sxevf_tx_context tx;
struct sxevf_rx_context rx;
struct napi_struct napi;
struct rcu_head rcu;
s8 name[IFNAMSIZ + SXEVF_IRQ_NAME_EXT_LEN];
struct sxevf_ring ring[0] ____cacheline_internodealigned_in_smp;
};
struct sxevf_irq_context {
struct msix_entry *msix_entries;
struct sxevf_irq_data *irq_data[SXEVF_MAX_QUEUE_IRQ_NUM];
u16 ring_irq_num;
u16 total_irq_num;
u16 rx_irq_interval;
u16 tx_irq_interval;
u32 irq_mask;
u32 mailbox_irq;
};
s32 sxevf_poll(struct napi_struct *napi, int weight);
void sxevf_irq_ctxt_exit(struct sxevf_adapter *adapter);
s32 sxevf_irq_ctxt_init(struct sxevf_adapter *adapter);
void sxevf_irq_release(struct sxevf_adapter *adapter);
void sxevf_hw_irq_configure(struct sxevf_adapter *adapter);
s32 sxevf_irq_configure(struct sxevf_adapter *adapter);
void sxevf_hw_irq_disable(struct sxevf_adapter *adapter);
void sxevf_napi_disable(struct sxevf_adapter *adapter);
void sxevf_configure_msix_hw(struct sxevf_adapter *adapter);
s32 sxevf_irq_coalesce_set(struct net_device *netdev,
struct ethtool_coalesce *user);
s32 sxevf_irq_coalesce_get(struct net_device *netdev,
struct ethtool_coalesce *user);
#endif

View File

@ -0,0 +1,783 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_main.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "sxe_version.h"
#include "sxe_log.h"
#include "sxevf_netdev.h"
#include "sxevf.h"
#include "sxevf_pci.h"
#include "sxevf_ring.h"
#include "sxevf_irq.h"
#include "sxevf_msg.h"
#define SXEVF_MSG_LEVEL_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define SXEVF_WAIT_RST_DONE_TIMES 200
static struct workqueue_struct *sxevf_wq;
struct net_device *g_netdev;
void sxevf_start_adapter(struct sxevf_adapter *adapter)
{
ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr,
adapter->mac_filter_ctxt.def_uc_addr);
clear_bit(SXEVF_HW_STOP, &adapter->hw.state);
}
s32 sxevf_dev_reset(struct sxevf_hw *hw)
{
u32 retry = SXEVF_RST_CHECK_NUM;
s32 ret;
struct sxevf_rst_msg msg = {};
struct sxevf_adapter *adapter = hw->adapter;
set_bit(SXEVF_HW_STOP, &hw->state);
hw->setup.ops->hw_stop(hw);
adapter->mbx_version = SXEVF_MBX_API_10;
hw->setup.ops->reset(hw);
if (hw->board_type == SXE_BOARD_VF_HV)
retry = SXEVF_RST_CHECK_NUM_HV;
while (!sxevf_pf_rst_check(hw) && retry) {
retry--;
SXEVF_UDELAY(5);
}
if (!retry) {
ret = -SXEVF_ERR_RESET_FAILED;
LOG_ERROR_BDF("retry use up, pf has not reset done.(err:%d)\n",
ret);
goto l_out;
}
retry = SXEVF_WAIT_RST_DONE_TIMES;
while (!hw->setup.ops->reset_done(hw) && retry) {
retry--;
msleep(50);
}
if (!retry) {
ret = -SXEVF_ERR_RESET_FAILED;
LOG_ERROR_BDF("retry use up,\n"
"\tvflr has not reset done.(err:%d)\n",
ret);
goto l_out;
}
hw->mbx.retry = SXEVF_MBX_RETRY_COUNT;
msg.msg_type = SXEVF_RESET;
ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
SXEVF_MSG_NUM(sizeof(msg)));
if (ret) {
LOG_ERROR_BDF("vf reset msg:%d len:%zu mailbox fail.(err:%d)\n",
msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), ret);
goto l_out;
}
sxevf_sw_mtu_set(adapter, msg.sw_mtu);
if (msg.msg_type == (SXEVF_RESET | SXEVF_MSGTYPE_ACK)) {
ether_addr_copy(adapter->mac_filter_ctxt.def_uc_addr,
(u8 *)(msg.mac_addr));
} else if (msg.msg_type != (SXEVF_RESET | SXEVF_MSGTYPE_NACK)) {
ret = -SXEVF_ERR_MAC_ADDR_INVALID;
LOG_ERROR_BDF("pf handle vf reset msg fail, rcv msg:0x%x.(err:%d)\n",
msg.msg_type, ret);
goto l_out;
}
adapter->mac_filter_ctxt.mc_filter_type = msg.mc_fiter_type;
LOG_INFO_BDF("vf get mc filter type:%d default mac addr:%pM from pf\n"
"\tsw_mtu:%u.\n",
adapter->mac_filter_ctxt.mc_filter_type,
adapter->mac_filter_ctxt.def_uc_addr, msg.sw_mtu);
l_out:
return ret;
}
static int sxevf_config_dma_mask(struct sxevf_adapter *adapter)
{
int ret = 0;
if (dma_set_mask_and_coherent(&adapter->pdev->dev,
DMA_BIT_MASK(SXEVF_DMA_BIT_WIDTH_64))) {
LOG_ERROR_BDF("device[pci_id %u] 64 dma mask\n"
"\tand coherent set failed\n",
adapter->pdev->dev.id);
ret = dma_set_mask_and_coherent(&adapter->pdev->dev,
DMA_BIT_MASK(SXEVF_DMA_BIT_WIDTH_32));
if (ret) {
LOG_DEV_ERR("device[pci_id %u] 32 dma mask\n"
"\tand coherent set failed\n",
adapter->pdev->dev.id);
}
}
return ret;
}
void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter)
{
s32 ret;
struct sxevf_hw *hw = &adapter->hw;
static const int api[] = { SXEVF_MBX_API_14, SXEVF_MBX_API_13,
SXEVF_MBX_API_12, SXEVF_MBX_API_11,
SXEVF_MBX_API_10, SXEVF_MBX_API_NR };
u32 idx = 0;
struct sxevf_mbx_api_msg msg;
spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != SXEVF_MBX_API_NR) {
msg.msg_type = SXEVF_API_NEGOTIATE;
msg.api_version = api[idx];
ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
SXEVF_MSG_NUM(sizeof(msg)));
if (!ret && (msg.msg_type ==
(SXEVF_API_NEGOTIATE | SXEVF_MSGTYPE_ACK))) {
adapter->mbx_version = api[idx];
break;
}
idx++;
}
spin_unlock_bh(&adapter->mbx_lock);
LOG_INFO_BDF("mbx version:%u.\n", adapter->mbx_version);
}
static int sxevf_pci_init(struct sxevf_adapter *adapter)
{
int ret;
size_t len;
resource_size_t bar_base_paddr;
struct pci_dev *pdev = adapter->pdev;
ret = pci_enable_device(pdev);
if (ret) {
LOG_ERROR_BDF("device[pci_id %u] pci enable failed\n", pdev->dev.id);
goto l_pci_enable_device_mem_failed;
}
ret = pci_request_regions(pdev, SXEVF_DRV_NAME);
if (ret) {
LOG_DEV_ERR("device[pci_id %u] request IO memory failed\n",
pdev->dev.id);
goto l_pci_request_mem_failed;
}
pci_set_master(pdev);
pci_save_state(pdev);
bar_base_paddr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
adapter->hw.reg_base_addr = ioremap(bar_base_paddr, len);
if (!adapter->hw.reg_base_addr) {
ret = -EIO;
LOG_ERROR_BDF("device[pci_id %u]\n"
"\tioremap[bar_base_paddr = 0x%llx, len = %zu]\n"
"\tfailed\n",
pdev->dev.id, (u64)bar_base_paddr, len);
goto l_ioremap_failed;
} else {
pci_set_drvdata(pdev, adapter);
}
LOG_INFO_BDF("bar_base_paddr = 0x%llx, bar len = %zu,\n"
"reg_base_addr = %p\n",
(u64)bar_base_paddr, len, adapter->hw.reg_base_addr);
return 0;
l_ioremap_failed:
pci_release_regions(pdev);
l_pci_request_mem_failed:
pci_disable_device(pdev);
l_pci_enable_device_mem_failed:
return ret;
}
static void sxevf_pci_exit(struct sxevf_adapter *adapter)
{
if (adapter->hw.reg_base_addr) {
iounmap(adapter->hw.reg_base_addr);
adapter->hw.reg_base_addr = NULL;
}
if (pci_is_enabled(adapter->pdev)) {
pci_release_regions(adapter->pdev);
pci_disable_device(adapter->pdev);
pci_set_drvdata(adapter->pdev, NULL);
}
}
static struct sxevf_adapter *sxevf_adapter_create(struct pci_dev *pdev)
{
struct net_device *netdev;
struct sxevf_adapter *adapter = NULL;
netdev = alloc_etherdev_mq(sizeof(struct sxevf_adapter),
SXEVF_TXRX_RING_NUM_MAX);
if (!netdev) {
LOG_ERROR("max:%d device[pci_id %u] sxe net device alloc failed\n",
SXEVF_TXRX_RING_NUM_MAX, pdev->dev.id);
goto l_netdev_alloc_failed;
}
adapter = netdev_priv(netdev);
adapter->pdev = pdev;
adapter->netdev = netdev;
adapter->msg_enable = netif_msg_init(-1, SXEVF_MSG_LEVEL_DEFAULT);
LOG_INFO_BDF("adapter:0x%pK netdev:0x%pK pdev:0x%pK\n", adapter, netdev,
pdev);
l_netdev_alloc_failed:
return adapter;
}
static inline u32 sxevf_readl(const void *reg)
{
return readl(reg);
}
static inline void sxevf_writel(u32 value, void *reg)
{
writel(value, reg);
}
static int sxevf_hw_base_init(struct sxevf_adapter *adapter)
{
int ret;
struct sxevf_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
hw->adapter = adapter;
adapter->mbx_version = SXEVF_MBX_API_10;
sxevf_hw_ops_init(hw);
sxevf_hw_reg_handle_init(hw, sxevf_readl, sxevf_writel);
sxevf_mbx_init(hw);
spin_lock_init(&adapter->mbx_lock);
ret = sxevf_dev_reset(hw);
if (ret) {
LOG_DEV_WARN("vf reset fail during probe.(err:%d)\n", ret);
} else {
sxevf_start_adapter(adapter);
sxevf_mbx_api_version_init(adapter);
ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr,
adapter->mac_filter_ctxt.def_uc_addr);
if (is_zero_ether_addr(adapter->mac_filter_ctxt.cur_uc_addr)) {
LOG_DEV_INFO("vf reset done, but pf don't assign\n"
"\tvalid mac addr for vf.\n");
}
#ifndef HAVE_ETH_HW_ADDR_SET_API
ether_addr_copy(netdev->dev_addr,
adapter->mac_filter_ctxt.cur_uc_addr);
#else
eth_hw_addr_set(netdev, adapter->mac_filter_ctxt.cur_uc_addr);
#endif
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr,
netdev->dev_addr);
ether_addr_copy(adapter->mac_filter_ctxt.def_uc_addr,
netdev->dev_addr);
LOG_DEV_INFO("vf use random mac addr:%pM.\n",
adapter->mac_filter_ctxt.def_uc_addr);
}
adapter->link.link_enable = true;
return 0;
}
static void sxevf_sw_base_init1(struct sxevf_adapter *adapter)
{
set_bit(SXEVF_DOWN, &adapter->state);
sxevf_ring_feature_init(adapter);
}
s32 sxevf_ring_irq_init(struct sxevf_adapter *adapter)
{
s32 ret;
sxevf_ring_num_set(adapter);
ret = sxevf_irq_ctxt_init(adapter);
if (ret)
LOG_ERROR_BDF("interrupt context init fail.(err:%d)\n", ret);
LOG_DEV_DEBUG("Multiqueue %s: Rx Queue count = %u,\n"
"\tTx Queue count = %u XDP Queue count %u\n",
(adapter->rx_ring_ctxt.num > 1) ? "Enabled" : "Disabled",
adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num,
adapter->xdp_ring_ctxt.num);
return ret;
}
void sxevf_ring_irq_exit(struct sxevf_adapter *adapter)
{
sxevf_irq_ctxt_exit(adapter);
}
void sxevf_save_reset_stats(struct sxevf_adapter *adapter)
{
struct sxevf_hw_stats *stats = &adapter->stats.hw;
if (stats->vfgprc || stats->vfgptc) {
stats->saved_reset_vfgprc += stats->vfgprc - stats->base_vfgprc;
stats->saved_reset_vfgptc += stats->vfgptc - stats->base_vfgptc;
stats->saved_reset_vfgorc += stats->vfgorc - stats->base_vfgorc;
stats->saved_reset_vfgotc += stats->vfgotc - stats->base_vfgotc;
stats->saved_reset_vfmprc += stats->vfmprc - stats->base_vfmprc;
}
}
void sxevf_last_counter_stats_init(struct sxevf_adapter *adapter)
{
struct sxevf_hw_stats *stats = &adapter->stats.hw;
struct sxevf_hw *hw = &adapter->hw;
hw->stat.ops->stats_init_value_get(hw, stats);
adapter->stats.hw.base_vfgprc = stats->last_vfgprc;
adapter->stats.hw.base_vfgorc = stats->last_vfgorc;
adapter->stats.hw.base_vfgptc = stats->last_vfgptc;
adapter->stats.hw.base_vfgotc = stats->last_vfgotc;
adapter->stats.hw.base_vfmprc = stats->last_vfmprc;
}
static void sxevf_sw_base_init2(struct sxevf_adapter *adapter)
{
sxevf_monitor_init(adapter);
#ifdef SXE_IPSEC_CONFIGURE
sxevf_ipsec_offload_init(adapter);
#endif
sxevf_last_counter_stats_init(adapter);
}
static int sxevf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
struct sxevf_adapter *adapter;
const char *device_name = dev_name(&pdev->dev);
adapter = sxevf_adapter_create(pdev);
if (!adapter) {
LOG_ERROR("adapter create failed.\n");
ret = -ENOMEM;
goto l_adapter_create_failed;
}
strlcpy(adapter->dev_name, device_name,
min_t(u32, strlen(device_name) + 1, DEV_NAME_LEN));
adapter->hw.board_type = id ? id->driver_data : SXE_BOARD_VF;
ret = sxevf_pci_init(adapter);
if (ret) {
LOG_ERROR_BDF("pci init failed.(ret:%d)\n", ret);
goto l_pci_init_failed;
}
ret = sxevf_config_dma_mask(adapter);
if (ret) {
LOG_ERROR_BDF("config dma mask failed.(ret:%d)\n", ret);
goto l_config_dma_mask_failed;
}
sxevf_netdev_init(adapter, pdev);
ret = sxevf_hw_base_init(adapter);
if (ret) {
LOG_ERROR_BDF("hardware base init failed.(ret:%d)\n", ret);
goto l_config_dma_mask_failed;
}
sxevf_sw_base_init1(adapter);
ret = sxevf_ring_irq_init(adapter);
if (ret) {
LOG_ERROR_BDF("interrupt ring assign scheme init failed,\n"
"\terr=%d\n",
ret);
goto l_config_dma_mask_failed;
}
sxevf_sw_base_init2(adapter);
strcpy(adapter->netdev->name, "eth%d");
ret = register_netdev(adapter->netdev);
if (ret) {
LOG_ERROR_BDF("register netdev failed.(ret:%d)\n", ret);
goto l_irq_init_failed;
}
set_bit(SXEVF_DOWN, &adapter->state);
netif_carrier_off(adapter->netdev);
LOG_DEV_INFO("%pM\n", adapter->netdev->dev_addr);
LOG_DEV_INFO("%s %s %s %s %s vf deviceId:0x%x mbx version:%u\n"
"\tprobe done.\n",
dev_driver_string(pdev->dev.parent),
dev_name(pdev->dev.parent), netdev_name(adapter->netdev),
dev_driver_string(&pdev->dev), dev_name(&pdev->dev),
pdev->device, adapter->mbx_version);
return 0;
l_irq_init_failed:
sxevf_ring_irq_exit(adapter);
l_config_dma_mask_failed:
sxevf_pci_exit(adapter);
l_pci_init_failed:
free_netdev(adapter->netdev);
l_adapter_create_failed:
return ret;
}
static void sxevf_fuc_exit(struct sxevf_adapter *adapter)
{
cancel_work_sync(&adapter->monitor_ctxt.work);
}
static void sxevf_remove(struct pci_dev *pdev)
{
struct sxevf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev;
LOG_INFO_BDF("sxevf remove.\n");
if (!adapter)
goto l_end;
set_bit(SXEVF_REMOVING, &adapter->state);
netdev = adapter->netdev;
sxevf_fuc_exit(adapter);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
#ifdef SXE_IPSEC_CONFIGURE
sxevf_ipsec_offload_exit(adapter);
#endif
sxevf_irq_ctxt_exit(adapter);
sxevf_pci_exit(adapter);
LOG_DEV_DEBUG("remove sxevf complete\n");
free_netdev(netdev);
LOG_INFO("%s %s %s %s deviceId:0x%x remove done.\n",
dev_driver_string(pdev->dev.parent),
dev_name(pdev->dev.parent), dev_driver_string(&pdev->dev),
dev_name(&pdev->dev), pdev->device);
l_end:
;
}
static s32 sxevf_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sxevf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
s32 ret = 0;
rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
sxevf_terminate(adapter);
sxevf_ring_irq_exit(adapter);
rtnl_unlock();
#ifdef CONFIG_PM
ret = pci_save_state(pdev);
if (ret) {
LOG_ERROR_BDF("save pci state fail.(err:%d)\n", ret);
return ret;
}
#endif
if (!test_and_set_bit(SXEVF_DISABLED, &adapter->state))
pci_disable_device(pdev);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static s32 sxevf_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sxevf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
s32 ret;
pci_restore_state(pdev);
pci_save_state(pdev);
ret = pci_enable_device_mem(pdev);
if (ret) {
LOG_DEV_ERR("enable pci device from suspend fail.(err:%d)", ret);
goto l_end;
}
/* in order to force CPU ordering */
smp_mb__before_atomic();
clear_bit(SXEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
sxevf_reset(adapter);
rtnl_lock();
sxevf_ring_num_set(adapter);
ret = sxevf_irq_ctxt_init(adapter);
if (!ret && netif_running(netdev))
ret = sxevf_open(netdev);
rtnl_unlock();
if (ret) {
LOG_ERROR_BDF("pci device resume fail.(err:%d)\n", ret);
goto l_end;
}
netif_device_attach(netdev);
l_end:
return ret;
}
#endif
static void sxevf_shutdown(struct pci_dev *pdev)
{
sxevf_suspend(&pdev->dev);
}
static void sxevf_io_resume(struct pci_dev *pdev)
{
struct sxevf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
LOG_DEBUG_BDF("oops,vf pci dev[%p] got io resume\n", pdev);
rtnl_lock();
if (netif_running(netdev)) {
LOG_DEBUG_BDF("netdev running resume adapter.\n");
sxevf_open(netdev);
}
netif_device_attach(netdev);
rtnl_unlock();
LOG_INFO_BDF("vf pci dev[%p] io resume done.\n", pdev);
}
static pci_ers_result_t sxevf_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct sxevf_adapter *adapter = netdev_priv(netdev);
pci_ers_result_t ret;
LOG_INFO_BDF("oops, vf pci dev[%p] got io slot reset\n", pdev);
if (pci_enable_device_mem(pdev)) {
LOG_DEV_ERR("cannot re-enable PCI device after reset.\n");
ret = PCI_ERS_RESULT_DISCONNECT;
goto l_out;
}
/* in order to force CPU ordering */
smp_mb__before_atomic();
clear_bit(SXEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
sxevf_reset(adapter);
ret = PCI_ERS_RESULT_RECOVERED;
l_out:
LOG_INFO_BDF("vf pci dev[%p] io slot reset done. ret=0x%x\n", pdev,
(u32)ret);
return ret;
}
static pci_ers_result_t sxevf_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct sxevf_adapter *adapter = netdev_priv(netdev);
pci_ers_result_t ret;
LOG_DEBUG_BDF("oops,vf pci dev[%p] got io error detect, state=0x%x\n",
pdev, (u32)state);
if (!test_bit(SXEVF_MONITOR_WORK_INITED, &adapter->state)) {
LOG_ERROR_BDF("vf monitor not inited\n");
ret = PCI_ERS_RESULT_DISCONNECT;
goto l_out;
}
rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
sxevf_terminate(adapter);
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
ret = PCI_ERS_RESULT_DISCONNECT;
goto l_out;
}
if (!test_and_set_bit(SXEVF_DISABLED, &adapter->state)) {
LOG_DEBUG_BDF("vf set disabled\n");
pci_disable_device(pdev);
}
rtnl_unlock();
ret = PCI_ERS_RESULT_NEED_RESET;
l_out:
LOG_INFO_BDF("vf detected io error detected end, ret=0x%x.\n", ret);
return ret;
}
static const struct pci_device_id sxevf_pci_tbl[] = {
{ PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
SXE_BOARD_VF },
{ PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC_HV, PCI_ANY_ID, PCI_ANY_ID, 0,
0, SXE_BOARD_VF_HV },
{
0,
}
};
static const struct pci_error_handlers sxevf_err_handler = {
.error_detected = sxevf_io_error_detected,
.slot_reset = sxevf_io_slot_reset,
.resume = sxevf_io_resume,
};
static SIMPLE_DEV_PM_OPS(sxevf_pm_ops, sxevf_suspend, sxevf_resume);
static struct pci_driver sxevf_pci_driver = {
.name = SXEVF_DRV_NAME,
.id_table = sxevf_pci_tbl,
.probe = sxevf_probe,
.remove = sxevf_remove,
.driver.pm = &sxevf_pm_ops,
.shutdown = sxevf_shutdown,
.err_handler = &sxevf_err_handler,
};
static int __init sxevf_init(void)
{
int ret;
LOG_PRVF_INFO("version[%s], commit_id[%s],\n"
"\tbranch[%s], build_time[%s]\n",
SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
#ifndef SXE_DRIVER_RELEASE
ret = sxe_log_init(true);
if (ret < 0) {
LOG_PRVF_ERR("sxe log init fail.(err:%d)\n", ret);
goto l_end;
}
#endif
sxevf_wq = create_singlethread_workqueue(SXEVF_DRV_NAME);
if (!sxevf_wq) {
LOG_PRVF_ERR("failed to create workqueue\n");
ret = -ENOMEM;
goto l_log_exit;
}
ret = pci_register_driver(&sxevf_pci_driver);
if (ret) {
LOG_ERROR("%s driver register fail.(err:%d)\n",
sxevf_pci_driver.name, ret);
goto l_pci_register_driver_failed;
}
LOG_INFO("pci driver:%s init done.\n", sxevf_pci_driver.name);
return 0;
l_pci_register_driver_failed:
destroy_workqueue(sxevf_wq);
sxevf_wq = NULL;
l_log_exit:
#ifndef SXE_DRIVER_RELEASE
sxe_log_exit();
l_end:
#endif
return ret;
}
struct workqueue_struct *sxevf_wq_get(void)
{
return sxevf_wq;
}
static void __exit sxevf_exit(void)
{
pci_unregister_driver(&sxevf_pci_driver);
if (sxevf_wq) {
destroy_workqueue(sxevf_wq);
sxevf_wq = NULL;
}
LOG_INFO("pci driver:%s exit done.\n", sxevf_pci_driver.name);
#ifndef SXE_DRIVER_RELEASE
sxe_log_exit();
#endif
}
MODULE_DEVICE_TABLE(pci, sxevf_pci_tbl);
MODULE_INFO(build_time, SXE_BUILD_TIME);
MODULE_INFO(branch, SXE_BRANCH);
MODULE_INFO(commit_id, SXE_COMMIT_ID);
MODULE_DESCRIPTION(SXEVF_DRV_DESCRIPTION);
MODULE_AUTHOR(SXEVF_DRV_AUTHOR);
MODULE_VERSION(SXE_VERSION);
MODULE_LICENSE(SXE_DRV_LICENSE);
module_init(sxevf_init);
module_exit(sxevf_exit);

View File

@ -0,0 +1,404 @@
// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_monitor.c
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#include <net/rtnetlink.h>
#include "sxevf.h"
#include "sxevf_netdev.h"
#include "sxevf_monitor.h"
#include "sxevf_ethtool.h"
#include "sxevf_msg.h"
#define SXEVF_CHECK_LINK_TIMER_PERIOD (HZ / 10)
#define SXEVF_NORMAL_TIMER_PERIOD (HZ * 2)
#define SXEVF_CHECK_LINK_CYCLE_CNT (5)
#define SXEVF_CHECK_LINK_DELAY_TIME (100)
void sxevf_task_timer_trigger(struct sxevf_adapter *adapter)
{
set_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state);
LOG_DEBUG_BDF("link check requester,\n"
"\tstate=%lx, monitor_state=%lx, is_up=%d\n",
adapter->state, adapter->monitor_ctxt.state,
adapter->link.is_up);
mod_timer(&adapter->monitor_ctxt.timer, jiffies);
}
void sxevf_monitor_work_schedule(struct sxevf_adapter *adapter)
{
struct workqueue_struct *wq = sxevf_wq_get();
if (!test_bit(SXEVF_DOWN, &adapter->state) &&
!test_bit(SXEVF_REMOVING, &adapter->state) &&
!test_and_set_bit(SXEVF_MONITOR_WORK_SCHED,
&adapter->monitor_ctxt.state))
queue_work(wq, &adapter->monitor_ctxt.work);
}
static void sxevf_timer_cb(struct timer_list *timer)
{
struct sxevf_monitor_context *monitor =
container_of(timer, struct sxevf_monitor_context, timer);
struct sxevf_adapter *adapter =
container_of(monitor, struct sxevf_adapter, monitor_ctxt);
unsigned long period;
if (test_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state))
period = SXEVF_CHECK_LINK_TIMER_PERIOD;
else
period = SXEVF_NORMAL_TIMER_PERIOD;
mod_timer(&adapter->monitor_ctxt.timer, period + jiffies);
sxevf_monitor_work_schedule(adapter);
}
static void sxevf_monitor_work_complete(struct sxevf_adapter *adapter)
{
BUG_ON(!test_bit(SXEVF_MONITOR_WORK_SCHED,
&adapter->monitor_ctxt.state));
/* in order to force CPU ordering */
smp_mb__before_atomic();
clear_bit(SXEVF_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state);
}
static s32 sxevf_ctrl_msg_check(struct sxevf_adapter *adapter)
{
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_ctrl_msg ctrl_msg;
s32 ret;
spin_lock_bh(&adapter->mbx_lock);
ret = sxevf_ctrl_msg_rcv_and_clear(hw,
(u32 *)&ctrl_msg,
SXEVF_MSG_NUM(sizeof(struct sxevf_ctrl_msg)));
spin_unlock_bh(&adapter->mbx_lock);
if (ret) {
LOG_ERROR_BDF("ctrl msg rcv fail due to lock fail.(err:%d)\n",
ret);
goto l_end;
}
if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_REINIT) {
adapter->link.need_reinit = true;
clear_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state);
LOG_WARN_BDF("rcv ctrl msg:0x%x need reinit vf.\n",
ctrl_msg.msg_type);
} else if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_NETDEV_DOWN) {
adapter->link.is_up = false;
set_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state);
LOG_WARN_BDF("rcv ctrl msg:0x%x need link down.\n",
ctrl_msg.msg_type);
} else if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_LINK_UPDATE) {
adapter->link.is_up = true;
LOG_WARN_BDF("rcv ctrl msg:0x%x physical link up.\n",
ctrl_msg.msg_type);
}
l_end:
return ret;
}
static void sxevf_physical_link_check(struct sxevf_adapter *adapter)
{
u32 link_reg, i;
u32 msg;
struct sxevf_hw *hw = &adapter->hw;
spin_lock_bh(&adapter->mbx_lock);
sxevf_ctrl_msg_rcv(hw, &msg, 1);
spin_unlock_bh(&adapter->mbx_lock);
link_reg = hw->setup.ops->link_state_get(hw);
if (!(link_reg & SXE_VFLINKS_UP)) {
adapter->link.is_up = false;
goto l_end;
}
for (i = 0; i < SXEVF_CHECK_LINK_CYCLE_CNT; i++) {
SXEVF_UDELAY(SXEVF_CHECK_LINK_DELAY_TIME);
link_reg = hw->setup.ops->link_state_get(hw);
if (!(link_reg & SXE_VFLINKS_UP)) {
adapter->link.is_up = false;
goto l_end;
}
}
switch (link_reg & SXE_VFLINKS_SPEED) {
case SXE_VFLINKS_SPEED_10G:
adapter->link.speed = SXEVF_LINK_SPEED_10GB_FULL;
break;
case SXE_VFLINKS_SPEED_1G:
adapter->link.speed = SXEVF_LINK_SPEED_1GB_FULL;
break;
case SXE_VFLINKS_SPEED_100:
adapter->link.speed = SXEVF_LINK_SPEED_100_FULL;
break;
}
adapter->link.is_up = true;
l_end:
LOG_INFO_BDF("link up status:%d.\n", adapter->link.is_up);
}
static void sxevf_link_up_handle(struct sxevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (netif_carrier_ok(netdev))
goto l_end;
LOG_DEV_INFO("NIC %s %s link state, down to up, speed=%s\n",
netdev_name(adapter->netdev),
dev_name(&adapter->pdev->dev),
(adapter->link.speed == SXEVF_LINK_SPEED_10GB_FULL) ?
"10 Gbps" :
(adapter->link.speed == SXEVF_LINK_SPEED_1GB_FULL) ?
"1 Gbps" :
(adapter->link.speed == SXEVF_LINK_SPEED_100_FULL) ?
"100 Mbps" :
"unknown speed");
netif_carrier_on(netdev);
l_end:
;
}
static void sxevf_link_down_handle(struct sxevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
adapter->link.speed = 0;
if (netif_carrier_ok(netdev)) {
LOG_DEV_INFO("NIC %s %s link state, up to down\n",
netdev_name(adapter->netdev),
dev_name(&adapter->pdev->dev));
netif_carrier_off(netdev);
}
}
static void sxevf_detect_link_work(struct sxevf_adapter *adapter)
{
struct sxevf_hw *hw = &adapter->hw;
s32 ret;
if (test_bit(SXEVF_DOWN, &adapter->state) ||
test_bit(SXEVF_RESETTING, &adapter->state))
goto l_end;
if (!sxevf_pf_rst_check(hw) || !hw->mbx.retry ||
(test_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state))) {
LOG_WARN_BDF("checked pf reset not done or someone timeout:%d\n"
"\tmonitor state:0x%lx.\n",
hw->mbx.retry, adapter->monitor_ctxt.state);
set_bit(SXEVF_LINK_CHECK_REQUESTED,
&adapter->monitor_ctxt.state);
}
if (!test_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state))
goto l_end;
sxevf_physical_link_check(adapter);
if (adapter->link.is_up) {
ret = sxevf_ctrl_msg_check(adapter);
if (ret) {
LOG_ERROR_BDF("ctrl msg rcv fail, try to next workqueue.\n");
goto l_end;
}
if (adapter->link.need_reinit || !hw->mbx.retry) {
adapter->link.need_reinit = false;
adapter->link.is_up = false;
set_bit(SXEVF_RESET_REQUESTED, &adapter->monitor_ctxt.state);
}
}
if (adapter->link.is_up) {
clear_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state);
if (adapter->link.link_enable)
sxevf_link_up_handle(adapter);
} else {
sxevf_link_down_handle(adapter);
}
l_end:
;
}
static void sxevf_reset_work(struct sxevf_adapter *adapter)
{
if (!test_and_clear_bit(SXEVF_RESET_REQUESTED,
&adapter->monitor_ctxt.state))
goto l_end;
rtnl_lock();
if (test_bit(SXEVF_DOWN, &adapter->state) ||
test_bit(SXEVF_REMOVING, &adapter->state) ||
test_bit(SXEVF_RESETTING, &adapter->state)) {
rtnl_unlock();
goto l_end;
}
LOG_ERROR_BDF("reset adapter\n");
adapter->stats.sw.tx_timeout_count++;
sxevf_hw_reinit(adapter);
if (adapter->link.mac_change) {
adapter->link.mac_change = false;
call_netdevice_notifiers(NETDEV_CHANGEADDR, adapter->netdev);
}
rtnl_unlock();
l_end:
;
}
static void sxevf_check_hang_work(struct sxevf_adapter *adapter)
{
u32 i;
u64 eics = 0;
struct sxevf_irq_data *irq_priv;
struct sxevf_hw *hw = &adapter->hw;
struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring;
struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring;
if (test_bit(SXEVF_DOWN, &adapter->state) ||
test_bit(SXEVF_RESETTING, &adapter->state))
goto l_end;
if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->tx_ring_ctxt.num; i++)
SXEVF_TX_HANG_PROC_ACTIVE(tx_ring[i]);
for (i = 0; i < adapter->xdp_ring_ctxt.num; i++)
SXEVF_TX_HANG_PROC_ACTIVE(xdp_ring[i]);
}
for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) {
irq_priv = adapter->irq_ctxt.irq_data[i];
if (irq_priv->tx.list.next || irq_priv->rx.list.next)
eics |= BIT(i);
}
hw->irq.ops->ring_irq_trigger(hw, eics);
LOG_INFO_BDF("set check hang flag ok eics:0x%llx\n", eics);
l_end:
;
}
static void sxevf_stats_update_work(struct sxevf_adapter *adapter)
{
if (test_bit(SXEVF_DOWN, &adapter->state) ||
test_bit(SXEVF_RESETTING, &adapter->state))
return;
sxevf_update_stats(adapter);
}
static s32 sxevf_hw_fault_handle_task(struct sxevf_adapter *adapter)
{
s32 ret = 0;
if (sxevf_is_hw_fault(&adapter->hw)) {
if (!test_bit(SXEVF_DOWN, &adapter->state)) {
rtnl_lock();
sxevf_down(adapter);
rtnl_unlock();
}
LOG_ERROR_BDF("sxe nic fault\n");
ret = -EFAULT;
}
return ret;
}
static void sxevf_ring_reassign_work(struct sxevf_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
if (!test_and_clear_bit(SXEVF_RING_REASSIGN_REQUESTED,
&adapter->monitor_ctxt.state))
goto l_end;
if (test_bit(SXEVF_DOWN, &adapter->state) ||
test_bit(SXEVF_RESETTING, &adapter->state))
goto l_end;
rtnl_lock();
if (netif_running(dev))
sxevf_close(dev);
sxevf_ring_irq_exit(adapter);
sxevf_ring_irq_init(adapter);
if (netif_running(dev))
sxevf_open(dev);
rtnl_unlock();
l_end:
;
}
static void sxevf_work_cb(struct work_struct *work)
{
struct sxevf_monitor_context *monitor =
container_of(work, struct sxevf_monitor_context, work);
struct sxevf_adapter *adapter =
container_of(monitor, struct sxevf_adapter, monitor_ctxt);
if (sxevf_hw_fault_handle_task(adapter))
goto l_end;
sxevf_ring_reassign_work(adapter);
sxevf_reset_work(adapter);
sxevf_detect_link_work(adapter);
sxevf_stats_update_work(adapter);
sxevf_check_hang_work(adapter);
l_end:
sxevf_monitor_work_complete(adapter);
}
static void sxevf_hw_fault_task_trigger(void *priv)
{
struct sxevf_adapter *adapter = (struct sxevf_adapter *)priv;
if (test_bit(SXEVF_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state)) {
sxevf_monitor_work_schedule(adapter);
LOG_ERROR_BDF("sxe vf nic fault, submit monitor task and\n"
"\tperform the down operation\n");
}
}
void sxevf_monitor_init(struct sxevf_adapter *adapter)
{
struct sxevf_hw *hw = &adapter->hw;
timer_setup(&adapter->monitor_ctxt.timer, sxevf_timer_cb, 0);
INIT_WORK(&adapter->monitor_ctxt.work, sxevf_work_cb);
set_bit(SXEVF_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state);
clear_bit(SXEVF_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state);
sxevf_hw_fault_handle_init(hw, sxevf_hw_fault_task_trigger, adapter);
}

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* Copyright (C), 2020, Linkdata Technologies Co., Ltd.
*
* @file: sxevf_monitor.h
* @author: Linkdata
* @date: 2025.02.16
* @brief:
* @note:
*/
#ifndef __SXEVF_MONITOR_H__
#define __SXEVF_MONITOR_H__
struct sxevf_adapter;
enum sxevf_monitor_task_state {
SXEVF_MONITOR_WORK_INITED,
SXEVF_MONITOR_WORK_SCHED,
SXEVF_RESET_REQUESTED,
SXEVF_LINK_CHECK_REQUESTED,
SXEVF_RING_REASSIGN_REQUESTED,
SXEVF_NETDEV_DOWN,
};
struct sxevf_monitor_context {
struct timer_list timer;
struct work_struct work;
unsigned long state;
};
struct sxevf_link_info {
u8 is_up : 1;
u8 need_reinit : 1;
u8 link_enable : 1;
u8 mac_change : 1;
u8 reservd : 4;
u32 speed;
unsigned long check_timeout;
};
void sxevf_task_timer_trigger(struct sxevf_adapter *adapter);
void sxevf_monitor_init(struct sxevf_adapter *adapter);
void sxevf_monitor_work_schedule(struct sxevf_adapter *adapter);
#endif

Some files were not shown because too many files have changed in this diff Show More