Remove redundant dpdk files.

This commit is contained in:
jfb8856606 2025-01-10 12:58:55 +00:00
parent 15eb0f40bf
commit 29c7d58350
310 changed files with 0 additions and 43195 deletions

View File

@ -1,26 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Microsoft Corporation
"""This script checks if the system supports huge pages"""
import platform
import ctypes
os_name = platform.system()
if os_name == "Linux":
try:
with open("/proc/sys/vm/nr_hugepages") as file_o:
content = file_o.read()
print(content)
except:
print("0")
elif os_name == "FreeBSD":
# Assume FreeBSD always has hugepages enabled
print("1")
elif os_name == "Windows":
if ctypes.windll.kernel32.GetLargePageMinimum() > 0:
print("1")
else:
print("0")
else:
print("0")

View File

@ -1,895 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#include <string.h>
#include <errno.h>
#include "test.h"
#include <rte_string_fns.h>
#include <rte_mbuf.h>
#include <rte_byteorder.h>
#include <rte_ip.h>
#ifdef RTE_EXEC_ENV_WINDOWS
static int
test_flow_classify(void)
{
printf("flow_classify not supported on Windows, skipping test\n");
return TEST_SKIPPED;
}
#else
#include <rte_acl.h>
#include <rte_common.h>
#include <rte_table_acl.h>
#include <rte_flow.h>
#include <rte_flow_classify.h>
#include "packet_burst_generator.h"
#include "test_flow_classify.h"
#define FLOW_CLASSIFY_MAX_RULE_NUM 100
#define MAX_PKT_BURST 32
#define NB_SOCKETS 4
#define MEMPOOL_CACHE_SIZE 256
#define MBUF_SIZE 512
#define NB_MBUF 512
/* test UDP, TCP and SCTP packets */
static struct rte_mempool *mbufpool[NB_SOCKETS];
static struct rte_mbuf *bufs[MAX_PKT_BURST];
static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
/* first input field - always one byte long. */
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* next input field (IPv4 source address) - 4 consecutive bytes. */
{
/* rte_flow uses a bit mask for IPv4 addresses */
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint32_t),
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
offsetof(struct rte_ipv4_hdr, src_addr),
},
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
{
/* rte_flow uses a bit mask for IPv4 addresses */
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint32_t),
.field_index = DST_FIELD_IPV4,
.input_index = DST_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
offsetof(struct rte_ipv4_hdr, dst_addr),
},
/*
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
* They share the same input index.
*/
{
/* rte_flow uses a bit mask for protocol ports */
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
offsetof(struct rte_tcp_hdr, src_port),
},
{
/* rte_flow uses a bit mask for protocol ports */
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
offsetof(struct rte_tcp_hdr, dst_port),
},
};
/* parameters for rte_flow_classify_validate and rte_flow_classify_create */
/* test UDP pattern:
* "eth / ipv4 src spec 2.2.2.3 src mask 255.255.255.00 dst spec 2.2.2.7
* dst mask 255.255.255.00 / udp src is 32 dst is 33 / end"
*/
static struct rte_flow_item_ipv4 ipv4_udp_spec_1 = {
{ { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_UDP, 0,
RTE_IPV4(2, 2, 2, 3), RTE_IPV4(2, 2, 2, 7)}
};
static const struct rte_flow_item_ipv4 ipv4_mask_24 = {
.hdr = {
.next_proto_id = 0xff,
.src_addr = 0xffffff00,
.dst_addr = 0xffffff00,
},
};
static struct rte_flow_item_udp udp_spec_1 = {
{ 32, 33, 0, 0 }
};
static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
0, 0, 0 };
static struct rte_flow_item eth_item_bad = { -1, 0, 0, 0 };
static struct rte_flow_item ipv4_udp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
&ipv4_udp_spec_1, 0, &ipv4_mask_24};
static struct rte_flow_item ipv4_udp_item_bad = { RTE_FLOW_ITEM_TYPE_IPV4,
NULL, 0, NULL};
static struct rte_flow_item udp_item_1 = { RTE_FLOW_ITEM_TYPE_UDP,
&udp_spec_1, 0, &rte_flow_item_udp_mask};
static struct rte_flow_item udp_item_bad = { RTE_FLOW_ITEM_TYPE_UDP,
NULL, 0, NULL};
static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
0, 0, 0 };
/* test TCP pattern:
* "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
* dst mask 255.255.255.00 / tcp src is 16 dst is 17 / end"
*/
static struct rte_flow_item_ipv4 ipv4_tcp_spec_1 = {
{ { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_TCP, 0,
RTE_IPV4(1, 2, 3, 4), RTE_IPV4(5, 6, 7, 8)}
};
static struct rte_flow_item_tcp tcp_spec_1 = {
{ 16, 17, 0, 0, 0, 0, 0, 0, 0}
};
static struct rte_flow_item ipv4_tcp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
&ipv4_tcp_spec_1, 0, &ipv4_mask_24};
static struct rte_flow_item tcp_item_1 = { RTE_FLOW_ITEM_TYPE_TCP,
&tcp_spec_1, 0, &rte_flow_item_tcp_mask};
/* test SCTP pattern:
* "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
* dst mask 255.255.255.00 / sctp src is 16 dst is 17/ end"
*/
static struct rte_flow_item_ipv4 ipv4_sctp_spec_1 = {
{ { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_SCTP, 0,
RTE_IPV4(11, 12, 13, 14), RTE_IPV4(15, 16, 17, 18)}
};
static struct rte_flow_item_sctp sctp_spec_1 = {
{ 10, 11, 0, 0}
};
static struct rte_flow_item ipv4_sctp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
&ipv4_sctp_spec_1, 0, &ipv4_mask_24};
static struct rte_flow_item sctp_item_1 = { RTE_FLOW_ITEM_TYPE_SCTP,
&sctp_spec_1, 0, &rte_flow_item_sctp_mask};
/* test actions:
* "actions count / end"
*/
static struct rte_flow_query_count count = {
.reset = 1,
.hits_set = 1,
.bytes_set = 1,
.hits = 0,
.bytes = 0,
};
static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
&count};
static struct rte_flow_action count_action_bad = { -1, 0};
static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
static struct rte_flow_action actions[2];
/* test attributes */
static struct rte_flow_attr attr;
/* test error */
static struct rte_flow_error error;
/* test pattern */
static struct rte_flow_item pattern[4];
/* flow classify data for UDP burst */
static struct rte_flow_classify_ipv4_5tuple_stats udp_ntuple_stats;
static struct rte_flow_classify_stats udp_classify_stats = {
.stats = (void *)&udp_ntuple_stats
};
/* flow classify data for TCP burst */
static struct rte_flow_classify_ipv4_5tuple_stats tcp_ntuple_stats;
static struct rte_flow_classify_stats tcp_classify_stats = {
.stats = (void *)&tcp_ntuple_stats
};
/* flow classify data for SCTP burst */
static struct rte_flow_classify_ipv4_5tuple_stats sctp_ntuple_stats;
static struct rte_flow_classify_stats sctp_classify_stats = {
.stats = (void *)&sctp_ntuple_stats
};
struct flow_classifier_acl *cls;
struct flow_classifier_acl {
struct rte_flow_classifier *cls;
} __rte_cache_aligned;
/*
* test functions by passing invalid or
* non-workable parameters.
*/
static int
test_invalid_parameters(void)
{
struct rte_flow_classify_rule *rule;
int ret;
ret = rte_flow_classify_validate(NULL, NULL, NULL, NULL, NULL);
if (!ret) {
printf("Line %i: rte_flow_classify_validate",
__LINE__);
printf(" with NULL param should have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(NULL, NULL, NULL, NULL,
NULL, NULL);
if (rule) {
printf("Line %i: flow_classifier_table_entry_add", __LINE__);
printf(" with NULL param should have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(NULL, NULL);
if (!ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" with NULL param should have failed!\n");
return -1;
}
ret = rte_flow_classifier_query(NULL, NULL, 0, NULL, NULL);
if (!ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" with NULL param should have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(NULL, NULL, NULL, NULL,
NULL, &error);
if (rule) {
printf("Line %i: flow_classify_table_entry_add ", __LINE__);
printf("with NULL param should have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(NULL, NULL);
if (!ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf("with NULL param should have failed!\n");
return -1;
}
ret = rte_flow_classifier_query(NULL, NULL, 0, NULL, NULL);
if (!ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" with NULL param should have failed!\n");
return -1;
}
return 0;
}
static int
test_valid_parameters(void)
{
struct rte_flow_classify_rule *rule;
int ret;
int key_found;
/*
* set up parameters for rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item;
pattern[1] = ipv4_udp_item_1;
pattern[2] = udp_item_1;
pattern[3] = end_item;
actions[0] = count_action;
actions[1] = end_action;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (ret) {
printf("Line %i: rte_flow_classify_validate",
__LINE__);
printf(" should not have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (!rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should not have failed!\n");
return -1;
}
return 0;
}
static int
test_invalid_patterns(void)
{
struct rte_flow_classify_rule *rule;
int ret;
int key_found;
/*
* set up parameters for rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item_bad;
pattern[1] = ipv4_udp_item_1;
pattern[2] = udp_item_1;
pattern[3] = end_item;
actions[0] = count_action;
actions[1] = end_action;
pattern[0] = eth_item;
pattern[1] = ipv4_udp_item_bad;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (!ret) {
printf("Line %i: rte_flow_classify_validate", __LINE__);
printf(" should have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (!ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should have failed!\n");
return -1;
}
pattern[1] = ipv4_udp_item_1;
pattern[2] = udp_item_bad;
pattern[3] = end_item;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (!ret) {
printf("Line %i: rte_flow_classify_validate", __LINE__);
printf(" should have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (!ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should have failed!\n");
return -1;
}
return 0;
}
static int
test_invalid_actions(void)
{
struct rte_flow_classify_rule *rule;
int ret;
int key_found;
/*
* set up parameters for rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item;
pattern[1] = ipv4_udp_item_1;
pattern[2] = udp_item_1;
pattern[3] = end_item;
actions[0] = count_action_bad;
actions[1] = end_action;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (!ret) {
printf("Line %i: rte_flow_classify_validate", __LINE__);
printf(" should have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (!ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should have failed!\n");
return -1;
}
return 0;
}
static int
init_ipv4_udp_traffic(struct rte_mempool *mp,
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
struct rte_udp_hdr pkt_udp_hdr;
uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
uint16_t src_port = 32;
uint16_t dst_port = 33;
uint16_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
printf("Set up IPv4 UDP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
pktlen = initialize_ipv4_header(&pkt_ipv4_hdr, src_addr, dst_addr,
pktlen);
printf("ETH + IPv4 pktlen %u\n", pktlen);
pktlen = initialize_udp_header(&pkt_udp_hdr, src_port, dst_port,
pktlen);
printf("ETH + IPv4 + UDP pktlen %u\n\n", pktlen);
return generate_packet_burst(mp, pkts_burst, &pkt_eth_hdr,
0, &pkt_ipv4_hdr, 1,
&pkt_udp_hdr, burst_size,
PACKET_BURST_GEN_PKT_LEN, 1);
}
static int
init_ipv4_tcp_traffic(struct rte_mempool *mp,
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
struct rte_tcp_hdr pkt_tcp_hdr;
uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
uint16_t src_port = 16;
uint16_t dst_port = 17;
uint16_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
printf("Set up IPv4 TCP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
dst_addr, pktlen, IPPROTO_TCP);
printf("ETH + IPv4 pktlen %u\n", pktlen);
pktlen = initialize_tcp_header(&pkt_tcp_hdr, src_port, dst_port,
pktlen);
printf("ETH + IPv4 + TCP pktlen %u\n\n", pktlen);
return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
0, &pkt_ipv4_hdr, 1, IPPROTO_TCP,
&pkt_tcp_hdr, burst_size,
PACKET_BURST_GEN_PKT_LEN, 1);
}
static int
init_ipv4_sctp_traffic(struct rte_mempool *mp,
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
struct rte_sctp_hdr pkt_sctp_hdr;
uint32_t src_addr = IPV4_ADDR(11, 12, 13, 14);
uint32_t dst_addr = IPV4_ADDR(15, 16, 17, 18);
uint16_t src_port = 10;
uint16_t dst_port = 11;
uint16_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
printf("Set up IPv4 SCTP traffic\n");
initialize_eth_header(&pkt_eth_hdr,
(struct rte_ether_addr *)src_mac,
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
printf("ETH pktlen %u\n", pktlen);
pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
dst_addr, pktlen, IPPROTO_SCTP);
printf("ETH + IPv4 pktlen %u\n", pktlen);
pktlen = initialize_sctp_header(&pkt_sctp_hdr, src_port, dst_port,
pktlen);
printf("ETH + IPv4 + SCTP pktlen %u\n\n", pktlen);
return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
0, &pkt_ipv4_hdr, 1, IPPROTO_SCTP,
&pkt_sctp_hdr, burst_size,
PACKET_BURST_GEN_PKT_LEN, 1);
}
static int
init_mbufpool(void)
{
int socketid;
int ret = 0;
unsigned int lcore_id;
char s[64];
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
socketid = rte_lcore_to_socket_id(lcore_id);
if (socketid >= NB_SOCKETS) {
printf(
"Socket %d of lcore %u is out of range %d\n",
socketid, lcore_id, NB_SOCKETS);
ret = -1;
break;
}
if (mbufpool[socketid] == NULL) {
snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
mbufpool[socketid] =
rte_pktmbuf_pool_create(s, NB_MBUF,
MEMPOOL_CACHE_SIZE, 0, MBUF_SIZE,
socketid);
if (mbufpool[socketid]) {
printf("Allocated mbuf pool on socket %d\n",
socketid);
} else {
printf("Cannot init mbuf pool on socket %d\n",
socketid);
ret = -ENOMEM;
break;
}
}
}
return ret;
}
static int
test_query_udp(void)
{
struct rte_flow_error error;
struct rte_flow_classify_rule *rule;
int ret;
int i;
int key_found;
ret = init_ipv4_udp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
if (ret != MAX_PKT_BURST) {
printf("Line %i: init_udp_ipv4_traffic has failed!\n",
__LINE__);
return -1;
}
for (i = 0; i < MAX_PKT_BURST; i++)
bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
/*
* set up parameters for rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item;
pattern[1] = ipv4_udp_item_1;
pattern[2] = udp_item_1;
pattern[3] = end_item;
actions[0] = count_action;
actions[1] = end_action;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (ret) {
printf("Line %i: rte_flow_classify_validate", __LINE__);
printf(" should not have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (!rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
rule, &udp_classify_stats);
if (ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should not have failed!\n");
return -1;
}
return 0;
}
static int
test_query_tcp(void)
{
struct rte_flow_classify_rule *rule;
int ret;
int i;
int key_found;
ret = init_ipv4_tcp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
if (ret != MAX_PKT_BURST) {
printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
__LINE__);
return -1;
}
for (i = 0; i < MAX_PKT_BURST; i++)
bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
/*
* set up parameters for rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item;
pattern[1] = ipv4_tcp_item_1;
pattern[2] = tcp_item_1;
pattern[3] = end_item;
actions[0] = count_action;
actions[1] = end_action;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" should not have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (!rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
rule, &tcp_classify_stats);
if (ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should not have failed!\n");
return -1;
}
return 0;
}
static int
test_query_sctp(void)
{
struct rte_flow_classify_rule *rule;
int ret;
int i;
int key_found;
ret = init_ipv4_sctp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
if (ret != MAX_PKT_BURST) {
printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
__LINE__);
return -1;
}
for (i = 0; i < MAX_PKT_BURST; i++)
bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
/*
* set up parameters rte_flow_classify_validate,
* rte_flow_classify_table_entry_add and
* rte_flow_classify_table_entry_delete
*/
attr.ingress = 1;
attr.priority = 1;
pattern[0] = eth_item;
pattern[1] = ipv4_sctp_item_1;
pattern[2] = sctp_item_1;
pattern[3] = end_item;
actions[0] = count_action;
actions[1] = end_action;
ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
actions, &error);
if (ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" should not have failed!\n");
return -1;
}
rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
actions, &key_found, &error);
if (!rule) {
printf("Line %i: flow_classify_table_entry_add", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
rule, &sctp_classify_stats);
if (ret) {
printf("Line %i: flow_classifier_query", __LINE__);
printf(" should not have failed!\n");
return -1;
}
ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
if (ret) {
printf("Line %i: rte_flow_classify_table_entry_delete",
__LINE__);
printf(" should not have failed!\n");
return -1;
}
return 0;
}
static int
test_flow_classify(void)
{
struct rte_table_acl_params table_acl_params;
struct rte_flow_classify_table_params cls_table_params;
struct rte_flow_classifier_params cls_params;
int ret;
uint32_t size;
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
cls = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
cls_params.name = "flow_classifier";
cls_params.socket_id = 0;
cls->cls = rte_flow_classifier_create(&cls_params);
if (cls->cls == NULL) {
printf("Line %i: flow classifier create has failed!\n",
__LINE__);
rte_free(cls);
return TEST_FAILED;
}
/* initialise ACL table params */
table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
table_acl_params.name = "table_acl_ipv4_5tuple";
table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
/* initialise table create params */
cls_table_params.ops = &rte_table_acl_ops;
cls_table_params.arg_create = &table_acl_params;
cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
ret = rte_flow_classify_table_create(cls->cls, &cls_table_params);
if (ret) {
printf("Line %i: f_create has failed!\n", __LINE__);
rte_flow_classifier_free(cls->cls);
rte_free(cls);
return TEST_FAILED;
}
printf("Created table_acl for for IPv4 five tuple packets\n");
ret = init_mbufpool();
if (ret) {
printf("Line %i: init_mbufpool has failed!\n", __LINE__);
return TEST_FAILED;
}
if (test_invalid_parameters() < 0)
return TEST_FAILED;
if (test_valid_parameters() < 0)
return TEST_FAILED;
if (test_invalid_patterns() < 0)
return TEST_FAILED;
if (test_invalid_actions() < 0)
return TEST_FAILED;
if (test_query_udp() < 0)
return TEST_FAILED;
if (test_query_tcp() < 0)
return TEST_FAILED;
if (test_query_sctp() < 0)
return TEST_FAILED;
return TEST_SUCCESS;
}
#endif /* !RTE_EXEC_ENV_WINDOWS */
REGISTER_TEST_COMMAND(flow_classify_autotest, test_flow_classify);

View File

@ -1,26 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#ifndef TEST_FLOW_CLASSIFY_H_
#define TEST_FLOW_CLASSIFY_H_
/* ACL field definitions for IPv4 5 tuple rule */
enum {
PROTO_FIELD_IPV4,
SRC_FIELD_IPV4,
DST_FIELD_IPV4,
SRCP_FIELD_IPV4,
DSTP_FIELD_IPV4,
NUM_FIELDS_IPV4
};
enum {
PROTO_INPUT_IPV4,
SRC_INPUT_IPV4,
DST_INPUT_IPV4,
SRCP_DESTP_INPUT_IPV4
};
#endif /* TEST_FLOW_CLASSIFY_H_ */

View File

@ -1,740 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#include "test.h"
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#if !defined(RTE_EXEC_ENV_LINUX) || !defined(RTE_LIB_KNI)
static int
test_kni(void)
{
printf("KNI not supported, skipping test\n");
return TEST_SKIPPED;
}
#else
#include <sys/wait.h>
#include <dirent.h>
#include <rte_string_fns.h>
#include <rte_mempool.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_kni.h>
#define NB_MBUF 8192
#define MAX_PACKET_SZ 2048
#define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
#define PKT_BURST_SZ 32
#define MEMPOOL_CACHE_SZ PKT_BURST_SZ
#define SOCKET 0
#define NB_RXD 1024
#define NB_TXD 1024
#define KNI_TIMEOUT_MS 5000 /* ms */
#define IFCONFIG "/sbin/ifconfig "
#define TEST_KNI_PORT "test_kni_port"
#define KNI_MODULE_PATH "/sys/module/rte_kni"
#define KNI_MODULE_PARAM_LO KNI_MODULE_PATH"/parameters/lo_mode"
#define KNI_TEST_MAX_PORTS 4
/* The threshold number of mbufs to be transmitted or received. */
#define KNI_NUM_MBUF_THRESHOLD 100
static int kni_pkt_mtu = 0;
struct test_kni_stats {
volatile uint64_t ingress;
volatile uint64_t egress;
};
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
.rx_free_thresh = 0,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
};
static const struct rte_eth_conf port_conf = {
.txmode = {
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
static struct rte_kni_ops kni_ops = {
.change_mtu = NULL,
.config_network_if = NULL,
.config_mac_address = NULL,
.config_promiscusity = NULL,
};
static unsigned int lcore_main, lcore_ingress, lcore_egress;
static struct rte_kni *test_kni_ctx;
static struct test_kni_stats stats;
static volatile uint32_t test_kni_processing_flag;
static struct rte_mempool *
test_kni_create_mempool(void)
{
struct rte_mempool * mp;
mp = rte_mempool_lookup("kni_mempool");
if (!mp)
mp = rte_pktmbuf_pool_create("kni_mempool",
NB_MBUF,
MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
SOCKET);
return mp;
}
static struct rte_mempool *
test_kni_lookup_mempool(void)
{
return rte_mempool_lookup("kni_mempool");
}
/* Callback for request of changing MTU */
static int
kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
printf("Change MTU of port %d to %u\n", port_id, new_mtu);
kni_pkt_mtu = new_mtu;
printf("Change MTU of port %d to %i successfully.\n",
port_id, kni_pkt_mtu);
return 0;
}
static int
test_kni_link_change(void)
{
int ret;
int pid;
pid = fork();
if (pid < 0) {
printf("Error: Failed to fork a process\n");
return -1;
}
if (pid == 0) {
printf("Starting KNI Link status change tests.\n");
if (system(IFCONFIG TEST_KNI_PORT" up") == -1) {
ret = -1;
goto error;
}
ret = rte_kni_update_link(test_kni_ctx, 1);
if (ret < 0) {
printf("Failed to change link state to Up ret=%d.\n",
ret);
goto error;
}
rte_delay_ms(1000);
printf("KNI: Set LINKUP, previous state=%d\n", ret);
ret = rte_kni_update_link(test_kni_ctx, 0);
if (ret != 1) {
printf(
"Failed! Previous link state should be 1, returned %d.\n",
ret);
goto error;
}
rte_delay_ms(1000);
printf("KNI: Set LINKDOWN, previous state=%d\n", ret);
ret = rte_kni_update_link(test_kni_ctx, 1);
if (ret != 0) {
printf(
"Failed! Previous link state should be 0, returned %d.\n",
ret);
goto error;
}
printf("KNI: Set LINKUP, previous state=%d\n", ret);
ret = 0;
rte_delay_ms(1000);
error:
if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
ret = -1;
printf("KNI: Link status change tests: %s.\n",
(ret == 0) ? "Passed" : "Failed");
exit(ret);
} else {
int p_ret, status;
while (1) {
p_ret = waitpid(pid, &status, WNOHANG);
if (p_ret != 0) {
if (WIFEXITED(status))
return WEXITSTATUS(status);
return -1;
}
rte_delay_ms(10);
rte_kni_handle_request(test_kni_ctx);
}
}
}
/**
* This loop fully tests the basic functions of KNI. e.g. transmitting,
* receiving to, from kernel space, and kernel requests.
*
* This is the loop to transmit/receive mbufs to/from kernel interface with
* supported by KNI kernel module. The ingress lcore will allocate mbufs and
* transmit them to kernel space; while the egress lcore will receive the mbufs
* from kernel space and free them.
* On the main lcore, several commands will be run to check handling the
* kernel requests. And it will finally set the flag to exit the KNI
* transmitting/receiving to/from the kernel space.
*
* Note: To support this testing, the KNI kernel module needs to be insmodded
* in one of its loopback modes.
*/
static int
test_kni_loop(__rte_unused void *arg)
{
int ret = 0;
unsigned nb_rx, nb_tx, num, i;
const unsigned lcore_id = rte_lcore_id();
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
if (lcore_id == lcore_main) {
rte_delay_ms(KNI_TIMEOUT_MS);
/* tests of handling kernel request */
if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
ret = -1;
if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
ret = -1;
if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
ret = -1;
rte_delay_ms(KNI_TIMEOUT_MS);
test_kni_processing_flag = 1;
} else if (lcore_id == lcore_ingress) {
struct rte_mempool *mp = test_kni_lookup_mempool();
if (mp == NULL)
return -1;
while (1) {
if (test_kni_processing_flag)
break;
for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
if (!pkts_burst[nb_rx])
break;
}
num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
nb_rx);
stats.ingress += num;
rte_kni_handle_request(test_kni_ctx);
if (num < nb_rx) {
for (i = num; i < nb_rx; i++) {
rte_pktmbuf_free(pkts_burst[i]);
}
}
rte_delay_ms(10);
}
} else if (lcore_id == lcore_egress) {
while (1) {
if (test_kni_processing_flag)
break;
num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
PKT_BURST_SZ);
stats.egress += num;
for (nb_tx = 0; nb_tx < num; nb_tx++)
rte_pktmbuf_free(pkts_burst[nb_tx]);
rte_delay_ms(10);
}
}
return ret;
}
static int
test_kni_allocate_lcores(void)
{
unsigned i, count = 0;
lcore_main = rte_get_main_lcore();
printf("main lcore: %u\n", lcore_main);
for (i = 0; i < RTE_MAX_LCORE; i++) {
if (count >=2 )
break;
if (rte_lcore_is_enabled(i) && i != lcore_main) {
count ++;
if (count == 1)
lcore_ingress = i;
else if (count == 2)
lcore_egress = i;
}
}
printf("count: %u\n", count);
return count == 2 ? 0 : -1;
}
static int
test_kni_register_handler_mp(void)
{
#define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
#define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
#define TEST_KNI_MTU 1450
#define TEST_KNI_MTU_STR " 1450"
int pid;
pid = fork();
if (pid < 0) {
printf("Failed to fork a process\n");
return -1;
} else if (pid == 0) {
int i;
struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
struct rte_kni_ops ops = {
.change_mtu = kni_change_mtu,
.config_network_if = NULL,
.config_mac_address = NULL,
.config_promiscusity = NULL,
};
if (!kni) {
printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
exit(-1);
}
kni_pkt_mtu = 0;
/* Check with the invalid parameters */
if (rte_kni_register_handlers(kni, NULL) == 0) {
printf("Unexpectedly register successfully "
"with NULL ops pointer\n");
exit(-1);
}
if (rte_kni_register_handlers(NULL, &ops) == 0) {
printf("Unexpectedly register successfully "
"to NULL KNI device pointer\n");
exit(-1);
}
if (rte_kni_register_handlers(kni, &ops)) {
printf("Fail to register ops\n");
exit(-1);
}
/* Check registering again after it has been registered */
if (rte_kni_register_handlers(kni, &ops) == 0) {
printf("Unexpectedly register successfully after "
"it has already been registered\n");
exit(-1);
}
/**
* Handle the request of setting MTU,
* with registered handlers.
*/
for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
rte_kni_handle_request(kni);
if (kni_pkt_mtu == TEST_KNI_MTU)
break;
rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
}
if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
printf("MTU has not been set\n");
exit(-1);
}
kni_pkt_mtu = 0;
if (rte_kni_unregister_handlers(kni) < 0) {
printf("Fail to unregister ops\n");
exit(-1);
}
/* Check with invalid parameter */
if (rte_kni_unregister_handlers(NULL) == 0) {
exit(-1);
}
/**
* Handle the request of setting MTU,
* without registered handlers.
*/
for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
rte_kni_handle_request(kni);
if (kni_pkt_mtu != 0)
break;
rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
}
if (kni_pkt_mtu != 0) {
printf("MTU shouldn't be set\n");
exit(-1);
}
exit(0);
} else {
int p_ret, status;
rte_delay_ms(1000);
if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
== -1)
return -1;
rte_delay_ms(1000);
if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
== -1)
return -1;
p_ret = wait(&status);
if (!WIFEXITED(status)) {
printf("Child process (%d) exit abnormally\n", p_ret);
return -1;
}
if (WEXITSTATUS(status) != 0) {
printf("Child process exit with failure\n");
return -1;
}
}
return 0;
}
static int
test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
{
int ret = 0;
unsigned i;
struct rte_kni *kni;
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
if (!mp)
return -1;
memset(&conf, 0, sizeof(conf));
memset(&info, 0, sizeof(info));
memset(&ops, 0, sizeof(ops));
ret = rte_eth_dev_info_get(port_id, &info);
if (ret != 0) {
printf("Error during getting device (port %u) info: %s\n",
port_id, strerror(-ret));
return -1;
}
snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
/* core id 1 configured for kernel thread */
conf.core_id = 1;
conf.force_bind = 1;
conf.mbuf_size = MAX_PACKET_SZ;
conf.group_id = port_id;
ops = kni_ops;
ops.port_id = port_id;
/* basic test of kni processing */
kni = rte_kni_alloc(mp, &conf, &ops);
if (!kni) {
printf("fail to create kni\n");
return -1;
}
test_kni_ctx = kni;
test_kni_processing_flag = 0;
stats.ingress = 0;
stats.egress = 0;
/**
* Check multiple processes support on
* registering/unregistering handlers.
*/
if (test_kni_register_handler_mp() < 0) {
printf("fail to check multiple process support\n");
ret = -1;
goto fail_kni;
}
ret = test_kni_link_change();
if (ret != 0)
goto fail_kni;
rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MAIN);
RTE_LCORE_FOREACH_WORKER(i) {
if (rte_eal_wait_lcore(i) < 0) {
ret = -1;
goto fail_kni;
}
}
/**
* Check if the number of mbufs received from kernel space is equal
* to that of transmitted to kernel space
*/
if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
stats.egress < KNI_NUM_MBUF_THRESHOLD) {
printf("The ingress/egress number should not be "
"less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
ret = -1;
goto fail_kni;
}
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
return -1;
}
test_kni_ctx = NULL;
/* test of reusing memzone */
kni = rte_kni_alloc(mp, &conf, &ops);
if (!kni) {
printf("fail to create kni\n");
return -1;
}
/* Release the kni for following testing */
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
return -1;
}
return ret;
fail_kni:
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
ret = -1;
}
return ret;
}
static int
test_kni(void)
{
int ret = -1;
uint16_t port_id;
struct rte_kni *kni;
struct rte_mempool *mp;
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
FILE *fd;
DIR *dir;
char buf[16];
dir = opendir(KNI_MODULE_PATH);
if (!dir) {
if (errno == ENOENT) {
printf("Cannot run UT due to missing rte_kni module\n");
return TEST_SKIPPED;
}
printf("opendir: %s", strerror(errno));
return -1;
}
closedir(dir);
/* Initialize KNI subsystem */
ret = rte_kni_init(KNI_TEST_MAX_PORTS);
if (ret < 0) {
printf("fail to initialize KNI subsystem\n");
return -1;
}
if (test_kni_allocate_lcores() < 0) {
printf("No enough lcores for kni processing\n");
return -1;
}
mp = test_kni_create_mempool();
if (!mp) {
printf("fail to create mempool for kni\n");
return -1;
}
/* configuring port 0 for the test is enough */
port_id = 0;
ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
if (ret < 0) {
printf("fail to configure port %d\n", port_id);
return -1;
}
ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
if (ret < 0) {
printf("fail to setup rx queue for port %d\n", port_id);
return -1;
}
ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
if (ret < 0) {
printf("fail to setup tx queue for port %d\n", port_id);
return -1;
}
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
printf("fail to start port %d\n", port_id);
return -1;
}
ret = rte_eth_promiscuous_enable(port_id);
if (ret != 0) {
printf("fail to enable promiscuous mode for port %d: %s\n",
port_id, rte_strerror(-ret));
return -1;
}
/* basic test of kni processing */
fd = fopen(KNI_MODULE_PARAM_LO, "r");
if (fd == NULL) {
printf("fopen: %s", strerror(errno));
return -1;
}
memset(&buf, 0, sizeof(buf));
if (fgets(buf, sizeof(buf), fd)) {
if (!strncmp(buf, "lo_mode_fifo", strlen("lo_mode_fifo")) ||
!strncmp(buf, "lo_mode_fifo_skb",
strlen("lo_mode_fifo_skb"))) {
ret = test_kni_processing(port_id, mp);
if (ret < 0) {
fclose(fd);
goto fail;
}
} else
printf("test_kni_processing skipped because of missing rte_kni module lo_mode argument\n");
}
fclose(fd);
/* test of allocating KNI with NULL mempool pointer */
memset(&info, 0, sizeof(info));
memset(&conf, 0, sizeof(conf));
memset(&ops, 0, sizeof(ops));
ret = rte_eth_dev_info_get(port_id, &info);
if (ret != 0) {
printf("Error during getting device (port %u) info: %s\n",
port_id, strerror(-ret));
return -1;
}
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
ops.port_id = port_id;
kni = rte_kni_alloc(NULL, &conf, &ops);
if (kni) {
ret = -1;
printf("unexpectedly creates kni successfully with NULL "
"mempool pointer\n");
goto fail;
}
/* test of allocating KNI without configurations */
kni = rte_kni_alloc(mp, NULL, NULL);
if (kni) {
ret = -1;
printf("Unexpectedly allocate KNI device successfully "
"without configurations\n");
goto fail;
}
/* test of allocating KNI without a name */
memset(&conf, 0, sizeof(conf));
memset(&info, 0, sizeof(info));
memset(&ops, 0, sizeof(ops));
ret = rte_eth_dev_info_get(port_id, &info);
if (ret != 0) {
printf("Error during getting device (port %u) info: %s\n",
port_id, strerror(-ret));
ret = -1;
goto fail;
}
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
ops.port_id = port_id;
kni = rte_kni_alloc(mp, &conf, &ops);
if (kni) {
ret = -1;
printf("Unexpectedly allocate a KNI device successfully "
"without a name\n");
goto fail;
}
/* test of releasing NULL kni context */
ret = rte_kni_release(NULL);
if (ret == 0) {
ret = -1;
printf("unexpectedly release kni successfully\n");
goto fail;
}
/* test of handling request on NULL device pointer */
ret = rte_kni_handle_request(NULL);
if (ret == 0) {
ret = -1;
printf("Unexpectedly handle request on NULL device pointer\n");
goto fail;
}
/* test of getting KNI device with pointer to NULL */
kni = rte_kni_get(NULL);
if (kni) {
ret = -1;
printf("Unexpectedly get a KNI device with "
"NULL name pointer\n");
goto fail;
}
/* test of getting KNI device with an zero length name string */
memset(&conf, 0, sizeof(conf));
kni = rte_kni_get(conf.name);
if (kni) {
ret = -1;
printf("Unexpectedly get a KNI device with "
"zero length name string\n");
goto fail;
}
/* test of getting KNI device with an invalid string name */
memset(&conf, 0, sizeof(conf));
snprintf(conf.name, sizeof(conf.name), "testing");
kni = rte_kni_get(conf.name);
if (kni) {
ret = -1;
printf("Unexpectedly get a KNI device with "
"a never used name string\n");
goto fail;
}
ret = 0;
fail:
if (rte_eth_dev_stop(port_id) != 0)
printf("Failed to stop port %u\n", port_id);
return ret;
}
#endif
REGISTER_TEST_COMMAND(kni_autotest, test_kni);

View File

@ -1,28 +0,0 @@
#!/bin/sh -e
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Red Hat, Inc.
which jq || {
echo "No jq available, skipping test."
exit 77
}
rootdir=$(readlink -f $(dirname $(readlink -f $0))/../..)
tmpoutput=$(mktemp -t dpdk.test_telemetry.XXXXXX)
trap "cat $tmpoutput; rm -f $tmpoutput" EXIT
call_all_telemetry() {
telemetry_script=$rootdir/usertools/dpdk-telemetry.py
echo >$tmpoutput
echo "Telemetry commands log:" >>$tmpoutput
for cmd in $(echo / | $telemetry_script | jq -r '.["/"][]')
do
for input in $cmd $cmd,0 $cmd,z
do
echo Calling $input >> $tmpoutput
echo $input | $telemetry_script >> $tmpoutput 2>&1
done
done
}
(sleep 1 && call_all_telemetry && echo quit) | $@

View File

@ -1,21 +0,0 @@
#! /usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2020 Intel Corporation
import subprocess
import sys
import tempfile
objdump, *cc = sys.argv[1:]
with tempfile.NamedTemporaryFile() as obj:
# On Windows, the file is opened exclusively and is not writable.
obj.close()
# from https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90028
gather_params = '0x8(,%ymm1,1),%ymm0{%k2}'
src = '__asm__("vpgatherqq {}");'.format(gather_params).encode('utf-8')
subprocess.run(cc + ['-c', '-xc', '-o', obj.name, '-'], input=src, check=True)
asm = subprocess.run([objdump, '-d', '--no-show-raw-insn', obj.name],
stdout=subprocess.PIPE, check=True).stdout.decode('utf-8')
if gather_params not in asm:
print('vpgatherqq displacement error with as')
sys.exit(1)

View File

@ -1,12 +0,0 @@
@@
expression cond, ret;
@@
-RTE_FUNC_PTR_OR_ERR_RET(cond, ret);
+if (cond == NULL)
+ return ret;
@@
expression cond;
@@
-RTE_FUNC_PTR_OR_RET(cond);
+if (cond == NULL)
+ return;

View File

@ -1,26 +0,0 @@
#!/bin/sh -e
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2019 Red Hat, Inc.
if [ $# != 1 ]; then
echo "Usage: $0 installdir" >&2
exit 1
fi
installdir=$1
if [ ! -d $installdir ]; then
echo "Error: install directory '$installdir' does not exist." >&2
exit 1
fi
dumpdir=$installdir/dump
rm -rf $dumpdir
mkdir -p $dumpdir
for f in $(find $installdir -name "*.so.*"); do
if test -L $f; then
continue
fi
libname=$(basename $f)
abidw --out-file $dumpdir/${libname%.so*}.dump $f
done

View File

@ -1,260 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2022 Intel Corporation
.. include:: <isonum.txt>
Intel\ |reg| ACC200 vRAN Dedicated Accelerator Poll Mode Driver
===============================================================
The Intel\ |reg| vRAN Dedicated Accelerator ACC200 peripheral enables
cost-effective 4G and 5G next-generation virtualized Radio Access Network (vRAN)
solutions integrated on Sapphire Rapids Edge Enhanced Processor (SPR-EE)
Intel\ |reg| 7 based Xeon\ |reg| multi-core server processor.
Features
--------
The ACC200 includes a 5G Low Density Parity Check (LDPC) encoder/decoder,
rate match/dematch, Hybrid Automatic Repeat Request (HARQ) with access to DDR
memory for buffer management, a 4G Turbo encoder/decoder,
a Fast Fourier Transform (FFT) block providing DFT/iDFT processing offload
for the 5G Sounding Reference Signal (SRS), a Queue Manager (QMGR),
and a DMA subsystem.
There is no dedicated on-card memory for HARQ,
this is using coherent memory on the CPU side.
These correspond to the following features exposed by the PMD:
- LDPC Encode in the Downlink (5GNR)
- LDPC Decode in the Uplink (5GNR)
- Turbo Encode in the Downlink (4G)
- Turbo Decode in the Uplink (4G)
- FFT processing
- SR-IOV with 16 VFs per PF
- Maximum of 256 queues per VF
- MSI
ACC200 PMD supports the following bbdev capabilities:
* For the LDPC encode operation:
- ``RTE_BBDEV_LDPC_CRC_24B_ATTACH``: set to attach CRC24B to CB(s).
- ``RTE_BBDEV_LDPC_RATE_MATCH``: if set then do not do Rate Match bypass.
- ``RTE_BBDEV_LDPC_INTERLEAVER_BYPASS``: if set then bypass interleaver.
* For the LDPC decode operation:
- ``RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK``: check CRC24B from CB(s).
- ``RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP``: drops CRC24B bits appended while decoding.
- ``RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK``: check CRC24A from CB(s).
- ``RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK``: check CRC16 from CB(s).
- ``RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE``: provides an input for HARQ combining.
- ``RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE``: provides an input for HARQ combining.
- ``RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE``: disable early termination.
- ``RTE_BBDEV_LDPC_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data.
- ``RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION``: supports compression of the HARQ input/output.
- ``RTE_BBDEV_LDPC_LLR_COMPRESSION``: supports LLR input compression.
* For the turbo encode operation:
- ``RTE_BBDEV_TURBO_CRC_24B_ATTACH``: set to attach CRC24B to CB(s).
- ``RTE_BBDEV_TURBO_RATE_MATCH``: if set then do not do Rate Match bypass.
- ``RTE_BBDEV_TURBO_ENC_INTERRUPTS``: set for encoder dequeue interrupts.
- ``RTE_BBDEV_TURBO_RV_INDEX_BYPASS``: set to bypass RV index.
- ``RTE_BBDEV_TURBO_ENC_SCATTER_GATHER``: supports scatter-gather for input/output data.
* For the turbo decode operation:
- ``RTE_BBDEV_TURBO_CRC_TYPE_24B``: check CRC24B from CB(s).
- ``RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE``: perform subblock de-interleave.
- ``RTE_BBDEV_TURBO_DEC_INTERRUPTS``: set for decoder dequeue interrupts.
- ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN``: set if negative LLR input is supported.
- ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP``: keep CRC24B bits appended while decoding.
- ``RTE_BBDEV_TURBO_DEC_CRC_24B_DROP``: option to drop the code block CRC after decoding.
- ``RTE_BBDEV_TURBO_EARLY_TERMINATION``: set early termination feature.
- ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data.
- ``RTE_BBDEV_TURBO_HALF_ITERATION_EVEN``: set half iteration granularity.
- ``RTE_BBDEV_TURBO_SOFT_OUTPUT``: set the APP LLR soft output.
- ``RTE_BBDEV_TURBO_EQUALIZER``: set the turbo equalizer feature.
- ``RTE_BBDEV_TURBO_SOFT_OUT_SATURATE``: set the soft output saturation.
- ``RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH``: set to run an extra odd iteration after CRC match.
- ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT``: set if negative APP LLR output supported.
- ``RTE_BBDEV_TURBO_MAP_DEC``: supports flexible parallel MAP engine decoding.
* For the FFT operation:
- ``RTE_BBDEV_FFT_WINDOWING``: flexible windowing capability.
- ``RTE_BBDEV_FFT_CS_ADJUSTMENT``: flexible adjustment of Cyclic Shift time offset.
- ``RTE_BBDEV_FFT_DFT_BYPASS``: set for bypass the DFT and get directly into iDFT input.
- ``RTE_BBDEV_FFT_IDFT_BYPASS``: set for bypass the IDFT and get directly the DFT output.
- ``RTE_BBDEV_FFT_WINDOWING_BYPASS``: set for bypass time domain windowing.
Installation
------------
Section 3 of the DPDK manual provides instructions on installing and compiling DPDK.
DPDK requires hugepages to be configured as detailed in section 2 of the DPDK manual.
The bbdev test application has been tested with a configuration 40 x 1GB hugepages.
The hugepage configuration of a server may be examined using:
.. code-block:: console
grep Huge* /proc/meminfo
Initialization
--------------
When the device first powers up, its PCI Physical Functions (PF)
can be listed through these commands for ACC200:
.. code-block:: console
sudo lspci -vd8086:57c0
The physical and virtual functions are compatible with Linux UIO drivers:
``vfio`` and ``igb_uio``.
However, in order to work the 5G/4G FEC device first needs to be bound
to one of these Linux drivers through DPDK.
Bind PF UIO driver(s)
~~~~~~~~~~~~~~~~~~~~~
Install the DPDK igb_uio driver, bind it with the PF PCI device ID and use
``lspci`` to confirm the PF device is under use by ``igb_uio`` DPDK UIO driver.
The igb_uio driver may be bound to the PF PCI device using one of two methods
for ACC200:
#. PCI functions (physical or virtual, depending on the use case) can be bound
to the UIO driver by repeating this command for every function.
.. code-block:: console
cd <dpdk-top-level-directory>
insmod build/kmod/igb_uio.ko
echo "8086 57c0" > /sys/bus/pci/drivers/igb_uio/new_id
lspci -vd8086:57c0
#. Another way to bind PF with DPDK UIO driver is by using the ``dpdk-devbind.py`` tool
.. code-block:: console
cd <dpdk-top-level-directory>
usertools/dpdk-devbind.py -b igb_uio 0000:f7:00.0
where the PCI device ID (example: 0000:f7:00.0) is obtained using ``lspci -vd8086:57c0``.
In a similar way the PF may be bound with vfio-pci as any PCIe device.
Enable Virtual Functions
~~~~~~~~~~~~~~~~~~~~~~~~
Now, it should be visible in the printouts that PCI PF is under igb_uio control
"``Kernel driver in use: igb_uio``"
To show the number of available VFs on the device, read ``sriov_totalvfs`` file.
.. code-block:: console
cat /sys/bus/pci/devices/0000\:<b>\:<d>.<f>/sriov_totalvfs
where ``0000\:<b>\:<d>.<f>`` is the PCI device ID
To enable VFs via igb_uio, echo the number of virtual functions intended
to enable to ``max_vfs`` file.
.. code-block:: console
echo <num-of-vfs> > /sys/bus/pci/devices/0000\:<b>\:<d>.<f>/max_vfs
Afterwards, all VFs must be bound to appropriate UIO drivers as required,
same way it was done with the physical function previously.
Enabling SR-IOV via VFIO driver is pretty much the same,
except that the file name is different:
.. code-block:: console
echo <num-of-vfs> > /sys/bus/pci/devices/0000\:<b>\:<d>.<f>/sriov_numvfs
Configure the VFs through PF
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The PCI virtual functions must be configured before working or getting assigned
to VMs/Containers.
The configuration involves allocating the number of hardware queues, priorities,
load balance, bandwidth and other settings necessary for the device
to perform FEC functions.
This configuration needs to be executed at least once after reboot or PCI FLR
and can be achieved by using the functions ``rte_acc200_configure()``,
which sets up the parameters defined in the compatible ``acc200_conf`` structure.
Test Application
----------------
The bbdev class is provided with a test application, ``test-bbdev.py``
and range of test data for testing the functionality of the device,
depending on the device's capabilities.
The test application is located under app/test-bbdev folder
and has the following options:
.. code-block:: console
"-p", "--testapp-path": specifies path to the bbdev test app.
"-e", "--eal-params": EAL arguments which are passed to the test app.
"-t", "--timeout": Timeout in seconds (default=300).
"-c", "--test-cases": Defines test cases to run. Run all if not specified.
"-v", "--test-vector": Test vector path.
"-n", "--num-ops": Number of operations to process on device (default=32).
"-b", "--burst-size": Operations enqueue/dequeue burst size (default=32).
"-s", "--snr": SNR in dB used when generating LLRs for bler tests.
"-s", "--iter_max": Number of iterations for LDPC decoder.
"-l", "--num-lcores": Number of lcores to run (default=16).
"-i", "--init-device": Initialise PF device with default values.
To execute the test application tool using simple decode or encode data,
type one of the following:
.. code-block:: console
./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_dec_default.data
./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_enc_default.data
The test application ``test-bbdev.py``, supports the ability to configure the
PF device with a default set of values, if the "-i" or "- -init-device" option
is included. The default values are defined in test_bbdev_perf.c.
Test Vectors
~~~~~~~~~~~~
In addition to the simple LDPC decoder and LDPC encoder tests,
bbdev also provides a range of additional tests under the test_vectors folder,
which may be useful.
The results of these tests will depend on the device capabilities which may
cause some test cases to be skipped, but no failure should be reported.
Alternate Baseband Device configuration tool
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On top of the embedded configuration feature supported in test-bbdev using
"- -init-device" option mentioned above, there is also a tool available
to perform that device configuration using a companion application.
The ``pf_bb_config`` application notably enables then to run bbdev-test
from the VF and not only limited to the PF as captured above.
See for more details: https://github.com/intel/pf-bb-config
Specifically for the bbdev ACC200 PMD, the command below can be used:
.. code-block:: console
pf_bb_config ACC200 -c ./acc200/acc200_config_vf_5g.cfg
test-bbdev.py -e="-c 0xff0 -a${VF_PCI_ADDR}" -c validation -n 64 -b 64 -l 1 -v ./ldpc_dec_default.data

View File

@ -1,14 +0,0 @@
;
; Supported features of the 'acc200' bbdev driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Turbo Decoder (4G) = Y
Turbo Encoder (4G) = Y
LDPC Decoder (5G) = Y
LDPC Encoder (5G) = Y
LLR/HARQ Compression = Y
FFT/SRS = Y
External DDR Access = N
HW Accelerated = Y

View File

@ -1,29 +0,0 @@
;
; Supported features of the 'LiquidIO' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Speed capabilities = Y
Link status = Y
Link status event = Y
MTU update = Y
Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
CRC offload = Y
VLAN offload = P
L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Basic stats = Y
Extended stats = Y
Multiprocess aware = Y
Linux = Y
x86-64 = Y
Usage doc = Y

View File

@ -1,170 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2017 Intel Corporation.
KNI Poll Mode Driver
======================
KNI PMD is wrapper to the :ref:`librte_kni <kni>` library.
This PMD enables using KNI without having a KNI specific application,
any forwarding application can use PMD interface for KNI.
Sending packets to any DPDK controlled interface or sending to the
Linux networking stack will be transparent to the DPDK application.
To create a KNI device ``net_kni#`` device name should be used, and this
will create ``kni#`` Linux virtual network interface.
There is no physical device backend for the virtual KNI device.
Packets sent to the KNI Linux interface will be received by the DPDK
application, and DPDK application may forward packets to a physical NIC
or to a virtual device (like another KNI interface or PCAP interface).
To forward any traffic from physical NIC to the Linux networking stack,
an application should control a physical port and create one virtual KNI port,
and forward between two.
Using this PMD requires KNI kernel module be inserted.
Usage
-----
EAL ``--vdev`` argument can be used to create KNI device instance, like::
dpdk-testpmd --vdev=net_kni0 --vdev=net_kni1 -- -i
Above command will create ``kni0`` and ``kni1`` Linux network interfaces,
those interfaces can be controlled by standard Linux tools.
When testpmd forwarding starts, any packets sent to ``kni0`` interface
forwarded to the ``kni1`` interface and vice versa.
There is no hard limit on number of interfaces that can be created.
Default interface configuration
-------------------------------
``librte_kni`` can create Linux network interfaces with different features,
feature set controlled by a configuration struct, and KNI PMD uses a fixed
configuration:
.. code-block:: console
Interface name: kni#
force bind kernel thread to a core : NO
mbuf size: (rte_pktmbuf_data_room_size(pktmbuf_pool) - RTE_PKTMBUF_HEADROOM)
mtu: (conf.mbuf_size - RTE_ETHER_HDR_LEN)
KNI control path is not supported with the PMD, since there is no physical
backend device by default.
PMD arguments
-------------
``no_request_thread``, by default PMD creates a pthread for each KNI interface
to handle Linux network interface control commands, like ``ifconfig kni0 up``
With ``no_request_thread`` option, pthread is not created and control commands
not handled by PMD.
By default request thread is enabled. And this argument should not be used
most of the time, unless this PMD used with customized DPDK application to handle
requests itself.
Argument usage::
dpdk-testpmd --vdev "net_kni0,no_request_thread=1" -- -i
PMD log messages
----------------
If KNI kernel module (rte_kni.ko) not inserted, following error log printed::
"KNI: KNI subsystem has not been initialized. Invoke rte_kni_init() first"
PMD testing
-----------
It is possible to test PMD quickly using KNI kernel module loopback feature:
* Insert KNI kernel module with loopback support:
.. code-block:: console
insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo_skb
* Start testpmd with no physical device but two KNI virtual devices:
.. code-block:: console
./dpdk-testpmd --vdev net_kni0 --vdev net_kni1 -- -i
.. code-block:: console
...
Configuring Port 0 (socket 0)
KNI: pci: 00:00:00 c580:b8
Port 0: 1A:4A:5B:7C:A2:8C
Configuring Port 1 (socket 0)
KNI: pci: 00:00:00 600:b9
Port 1: AE:95:21:07:93:DD
Checking link statuses...
Port 0 Link Up - speed 10000 Mbps - full-duplex
Port 1 Link Up - speed 10000 Mbps - full-duplex
Done
testpmd>
* Observe Linux interfaces
.. code-block:: console
$ ifconfig kni0 && ifconfig kni1
kni0: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether ae:8e:79:8e:9b:c8 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
kni1: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether 9e:76:43:53:3e:9b txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
* Start forwarding with tx_first:
.. code-block:: console
testpmd> start tx_first
* Quit and check forwarding stats:
.. code-block:: console
testpmd> quit
Telling cores to stop...
Waiting for lcores to finish...
---------------------- Forward statistics for port 0 ----------------------
RX-packets: 35637905 RX-dropped: 0 RX-total: 35637905
TX-packets: 35637947 TX-dropped: 0 TX-total: 35637947
----------------------------------------------------------------------------
---------------------- Forward statistics for port 1 ----------------------
RX-packets: 35637915 RX-dropped: 0 RX-total: 35637915
TX-packets: 35637937 TX-dropped: 0 TX-total: 35637937
----------------------------------------------------------------------------
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
RX-packets: 71275820 RX-dropped: 0 RX-total: 71275820
TX-packets: 71275884 TX-dropped: 0 TX-total: 71275884
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

View File

@ -1,169 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2017 Cavium, Inc
LiquidIO VF Poll Mode Driver
============================
The LiquidIO VF PMD library (**librte_net_liquidio**) provides poll mode driver support for
Cavium LiquidIO® II server adapter VFs. PF management and VF creation can be
done using kernel driver.
More information can be found at `Cavium Official Website
<http://cavium.com/LiquidIO_Adapters.html>`_.
Supported LiquidIO Adapters
-----------------------------
- LiquidIO II CN2350 210SV/225SV
- LiquidIO II CN2350 210SVPT
- LiquidIO II CN2360 210SV/225SV
- LiquidIO II CN2360 210SVPT
SR-IOV: Prerequisites and Sample Application Notes
--------------------------------------------------
This section provides instructions to configure SR-IOV with Linux OS.
#. Verify SR-IOV and ARI capabilities are enabled on the adapter using ``lspci``:
.. code-block:: console
lspci -s <slot> -vvv
Example output:
.. code-block:: console
[...]
Capabilities: [148 v1] Alternative Routing-ID Interpretation (ARI)
[...]
Capabilities: [178 v1] Single Root I/O Virtualization (SR-IOV)
[...]
Kernel driver in use: LiquidIO
#. Load the kernel module:
.. code-block:: console
modprobe liquidio
#. Bring up the PF ports:
.. code-block:: console
ifconfig p4p1 up
ifconfig p4p2 up
#. Change PF MTU if required:
.. code-block:: console
ifconfig p4p1 mtu 9000
ifconfig p4p2 mtu 9000
#. Create VF device(s):
Echo number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
of the parent PF.
.. code-block:: console
echo 1 > /sys/bus/pci/devices/0000:03:00.0/sriov_numvfs
echo 1 > /sys/bus/pci/devices/0000:03:00.1/sriov_numvfs
#. Assign VF MAC address:
Assign MAC address to the VF using iproute2 utility. The syntax is::
ip link set <PF iface> vf <VF id> mac <macaddr>
Example output:
.. code-block:: console
ip link set p4p1 vf 0 mac F2:A8:1B:5E:B4:66
#. Assign VF(s) to VM.
The VF devices may be passed through to the guest VM using qemu or
virt-manager or virsh etc.
Example qemu guest launch command:
.. code-block:: console
./qemu-system-x86_64 -name lio-vm -machine accel=kvm \
-cpu host -m 4096 -smp 4 \
-drive file=<disk_file>,if=none,id=disk1,format=<type> \
-device virtio-blk-pci,scsi=off,drive=disk1,id=virtio-disk1,bootindex=1 \
-device vfio-pci,host=03:00.3 -device vfio-pci,host=03:08.3
#. Running testpmd
Refer to the document
:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run
``testpmd`` application.
.. note::
Use ``igb_uio`` instead of ``vfio-pci`` in VM.
Example output:
.. code-block:: console
[...]
EAL: PCI device 0000:03:00.3 on NUMA socket 0
EAL: probe driver: 177d:9712 net_liovf
EAL: using IOMMU type 1 (Type 1)
PMD: net_liovf[03:00.3]INFO: DEVICE : CN23XX VF
EAL: PCI device 0000:03:08.3 on NUMA socket 0
EAL: probe driver: 177d:9712 net_liovf
PMD: net_liovf[03:08.3]INFO: DEVICE : CN23XX VF
Interactive-mode selected
USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0
Configuring Port 0 (socket 0)
PMD: net_liovf[03:00.3]INFO: Starting port 0
Port 0: F2:A8:1B:5E:B4:66
Configuring Port 1 (socket 0)
PMD: net_liovf[03:08.3]INFO: Starting port 1
Port 1: 32:76:CC:EE:56:D7
Checking link statuses...
Port 0 Link Up - speed 10000 Mbps - full-duplex
Port 1 Link Up - speed 10000 Mbps - full-duplex
Done
testpmd>
#. Enabling VF promiscuous mode
One VF per PF can be marked as trusted for promiscuous mode.
.. code-block:: console
ip link set dev <PF iface> vf <VF id> trust on
Limitations
-----------
VF MTU
~~~~~~
VF MTU is limited by PF MTU. Raise PF value before configuring VF for larger packet size.
VLAN offload
~~~~~~~~~~~~
Tx VLAN insertion is not supported and consequently VLAN offload feature is
marked partial.
Ring size
~~~~~~~~~
Number of descriptors for Rx/Tx ring should be in the range 128 to 512.
CRC stripping
~~~~~~~~~~~~~
LiquidIO adapters strip ethernet FCS of every packet coming to the host interface.

View File

@ -1,424 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2017 Intel Corporation.
Flow Classification Library
===========================
.. note::
The Flow Classification library is deprecated and will be removed in future.
See :doc:`../rel_notes/deprecation`.
It is disabled by default in the DPDK build.
To re-enable the library, remove 'flow_classify' from the "disable_libs"
meson option when configuring a build.
DPDK provides a Flow Classification library that provides the ability
to classify an input packet by matching it against a set of Flow rules.
The initial implementation supports counting of IPv4 5-tuple packets which match
a particular Flow rule only.
Please refer to the
:doc:`./rte_flow`
for more information.
The Flow Classification library uses the ``librte_table`` API for managing Flow
rules and matching packets against the Flow rules.
The library is table agnostic and can use the following tables:
``Access Control List``, ``Hash`` and ``Longest Prefix Match(LPM)``.
The ``Access Control List`` table is used in the initial implementation.
Please refer to the
:doc:`./packet_framework`
for more information.on ``librte_table``.
DPDK provides an Access Control List library that provides the ability to
classify an input packet based on a set of classification rules.
Please refer to the
:doc:`./packet_classif_access_ctrl`
library for more information on ``librte_acl``.
There is also a Flow Classify sample application which demonstrates the use of
the Flow Classification Library API's.
Please refer to the
:doc:`../sample_app_ug/flow_classify`
for more information on the ``flow_classify`` sample application.
Overview
--------
The library has the following API's
.. code-block:: c
/**
* Flow classifier create
*
* @param params
* Parameters for flow classifier creation
* @return
* Handle to flow classifier instance on success or NULL otherwise
*/
struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params *params);
/**
* Flow classifier free
*
* @param cls
* Handle to flow classifier instance
* @return
* 0 on success, error code otherwise
*/
int
rte_flow_classifier_free(struct rte_flow_classifier *cls);
/**
* Flow classify table create
*
* @param cls
* Handle to flow classifier instance
* @param params
* Parameters for flow_classify table creation
* @return
* 0 on success, error code otherwise
*/
int
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params);
/**
* Validate the flow classify rule
*
* @param[in] cls
* Handle to flow classifier instance
* @param[in] attr
* Flow rule attributes
* @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END pattern item).
* @param[out] error
* Perform verbose error reporting if not NULL. Structure
* initialised in case of error only.
* @return
* 0 on success, error code otherwise
*/
int
rte_flow_classify_validate(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
/**
* Add a flow classify rule to the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
* @param[in] attr
* Flow rule attributes
* @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END pattern item).
* @param[out] key_found
* returns 1 if rule present already, 0 otherwise.
* @param[out] error
* Perform verbose error reporting if not NULL. Structure
* initialised in case of error only.
* @return
* A valid handle in case of success, NULL otherwise.
*/
struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
int *key_found;
struct rte_flow_error *error);
/**
* Delete a flow classify rule from the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
* @param[in] rule
* Flow classify rule
* @return
* 0 on success, error code otherwise.
*/
int
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
struct rte_flow_classify_rule *rule);
/**
* Query flow classifier for given rule.
*
* @param[in] cls
* Flow classifier handle
* @param[in] pkts
* Pointer to packets to process
* @param[in] nb_pkts
* Number of packets to process
* @param[in] rule
* Flow classify rule
* @param[in] stats
* Flow classify stats
*
* @return
* 0 on success, error code otherwise.
*/
int
rte_flow_classifier_query(struct rte_flow_classifier *cls,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
struct rte_flow_classify_rule *rule,
struct rte_flow_classify_stats *stats);
Classifier creation
~~~~~~~~~~~~~~~~~~~
The application creates the ``Classifier`` using the
``rte_flow_classifier_create`` API.
The ``rte_flow_classify_params`` structure must be initialised by the
application before calling the API.
.. code-block:: c
struct rte_flow_classifier_params {
/** flow classifier name */
const char *name;
/** CPU socket ID where memory for the flow classifier and its */
/** elements (tables) should be allocated */
int socket_id;
};
The ``Classifier`` has the following internal structures:
.. code-block:: c
struct rte_cls_table {
/* Input parameters */
struct rte_table_ops ops;
uint32_t entry_size;
enum rte_flow_classify_table_type type;
/* Handle to the low-level table object */
void *h_table;
};
#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
struct rte_flow_classifier {
/* Input parameters */
char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
int socket_id;
/* Internal */
/* ntuple_filter */
struct rte_eth_ntuple_filter ntuple_filter;
/* classifier tables */
struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
uint32_t table_mask;
uint32_t num_tables;
uint16_t nb_pkts;
struct rte_flow_classify_table_entry
*entries[RTE_PORT_IN_BURST_SIZE_MAX];
} __rte_cache_aligned;
Adding a table to the Classifier
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The application adds a table to the ``Classifier`` using the
``rte_flow_classify_table_create`` API.
The ``rte_flow_classify_table_params`` structure must be initialised by the
application before calling the API.
.. code-block:: c
struct rte_flow_classify_table_params {
/** Table operations (specific to each table type) */
struct rte_table_ops *ops;
/** Opaque param to be passed to the table create operation */
void *arg_create;
/** Classifier table type */
enum rte_flow_classify_table_type type;
};
To create an ACL table the ``rte_table_acl_params`` structure must be
initialised and assigned to ``arg_create`` in the
``rte_flow_classify_table_params`` structure.
.. code-block:: c
struct rte_table_acl_params {
/** Name */
const char *name;
/** Maximum number of ACL rules in the table */
uint32_t n_rules;
/** Number of fields in the ACL rule specification */
uint32_t n_rule_fields;
/** Format specification of the fields of the ACL rule */
struct rte_acl_field_def field_format[RTE_ACL_MAX_FIELDS];
};
The fields for the ACL rule must also be initialised by the application.
An ACL table can be added to the ``Classifier`` for each ACL rule, for example
another table could be added for the IPv6 5-tuple rule.
Flow Parsing
~~~~~~~~~~~~
The library currently supports three IPv4 5-tuple flow patterns, for UDP, TCP
and SCTP.
.. code-block:: c
/* Pattern for IPv4 5-tuple UDP filter */
static enum rte_flow_item_type pattern_ntuple_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
/* Pattern for IPv4 5-tuple TCP filter */
static enum rte_flow_item_type pattern_ntuple_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
RTE_FLOW_ITEM_TYPE_END,
};
/* Pattern for IPv4 5-tuple SCTP filter */
static enum rte_flow_item_type pattern_ntuple_3[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
RTE_FLOW_ITEM_TYPE_END,
};
The API function ``rte_flow_classify_validate`` parses the
IPv4 5-tuple pattern, attributes and actions and returns the 5-tuple data in the
``rte_eth_ntuple_filter`` structure.
.. code-block:: c
static int
rte_flow_classify_validate(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
Adding Flow Rules
~~~~~~~~~~~~~~~~~
The ``rte_flow_classify_table_entry_add`` API creates an
``rte_flow_classify`` object which contains the flow_classify id and type, the
action, a union of add and delete keys and a union of rules.
It uses the ``rte_flow_classify_validate`` API function for parsing the
flow parameters.
The 5-tuple ACL key data is obtained from the ``rte_eth_ntuple_filter``
structure populated by the ``classify_parse_ntuple_filter`` function which
parses the Flow rule.
.. code-block:: c
struct acl_keys {
struct rte_table_acl_rule_add_params key_add; /* add key */
struct rte_table_acl_rule_delete_params key_del; /* delete key */
};
struct classify_rules {
enum rte_flow_classify_rule_type type;
union {
struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
} u;
};
struct rte_flow_classify {
uint32_t id; /* unique ID of classify object */
enum rte_flow_classify_table_type tbl_type; /* rule table */
struct classify_rules rules; /* union of rules */
union {
struct acl_keys key;
} u;
int key_found; /* rule key found in table */
struct rte_flow_classify_table_entry entry; /* rule meta data */
void *entry_ptr; /* handle to the table entry for rule meta data */
};
It then calls the ``table.ops.f_add`` API to add the rule to the ACL
table.
Deleting Flow Rules
~~~~~~~~~~~~~~~~~~~
The ``rte_flow_classify_table_entry_delete`` API calls the
``table.ops.f_delete`` API to delete a rule from the ACL table.
Packet Matching
~~~~~~~~~~~~~~~
The ``rte_flow_classifier_query`` API is used to find packets which match a
given flow rule in the table.
This API calls the flow_classify_run internal function which calls the
``table.ops.f_lookup`` API to see if any packets in a burst match any
of the Flow rules in the table.
The meta data for the highest priority rule matched for each packet is returned
in the entries array in the ``rte_flow_classify`` object.
The internal function ``action_apply`` implements the ``Count`` action which is
used to return data which matches a particular Flow rule.
The rte_flow_classifier_query API uses the following structures to return data
to the application.
.. code-block:: c
/** IPv4 5-tuple data */
struct rte_flow_classify_ipv4_5tuple {
uint32_t dst_ip; /**< Destination IP address in big endian. */
uint32_t dst_ip_mask; /**< Mask of destination IP address. */
uint32_t src_ip; /**< Source IP address in big endian. */
uint32_t src_ip_mask; /**< Mask of destination IP address. */
uint16_t dst_port; /**< Destination port in big endian. */
uint16_t dst_port_mask; /**< Mask of destination port. */
uint16_t src_port; /**< Source Port in big endian. */
uint16_t src_port_mask; /**< Mask of source port. */
uint8_t proto; /**< L4 protocol. */
uint8_t proto_mask; /**< Mask of L4 protocol. */
};
/**
* Flow stats
*
* For the count action, stats can be returned by the query API.
*
* Storage for stats is provided by the application.
*
*
*/
struct rte_flow_classify_stats {
void *stats;
};
struct rte_flow_classify_5tuple_stats {
/** count of packets that match IPv4 5tuple pattern */
uint64_t counter1;
/** IPv4 5tuple data */
struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
};

View File

@ -1,423 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2010-2015 Intel Corporation.
.. _kni:
Kernel NIC Interface
====================
.. note::
KNI is deprecated and will be removed in future.
See :doc:`../rel_notes/deprecation`.
:ref:`virtio_user_as_exception_path` alternative is the preferred way
for interfacing with the Linux network stack
as it is an in-kernel solution and has similar performance expectations.
.. note::
KNI is disabled by default in the DPDK build.
To re-enable the library, remove 'kni' from the "disable_libs" meson option when configuring a build.
The DPDK Kernel NIC Interface (KNI) allows userspace applications access to the Linux* control plane.
KNI provides an interface with the kernel network stack
and allows management of DPDK ports using standard Linux net tools
such as ``ethtool``, ``iproute2`` and ``tcpdump``.
The main use case of KNI is to get/receive exception packets from/to Linux network stack
while main datapath IO is done bypassing the networking stack.
There are other alternatives to KNI, all are available in the upstream Linux:
#. :ref:`virtio_user_as_exception_path`
#. :doc:`../nics/tap` as wrapper to `Linux tun/tap
<https://www.kernel.org/doc/Documentation/networking/tuntap.txt>`_
The benefits of using the KNI against alternatives are:
* Faster than existing Linux TUN/TAP interfaces
(by eliminating system calls and copy_to_user()/copy_from_user() operations.
The disadvantages of the KNI are:
* It is out-of-tree Linux kernel module
which makes updating and distributing the driver more difficult.
Most users end up building the KNI driver from source
which requires the packages and tools to build kernel modules.
* As it shares memory between userspace and kernelspace,
and kernel part directly uses input provided by userspace, it is not safe.
This makes hard to upstream the module.
* Requires dedicated kernel cores.
* Only a subset of net devices control commands are supported by KNI.
The components of an application using the DPDK Kernel NIC Interface are shown in :numref:`figure_kernel_nic_intf`.
.. _figure_kernel_nic_intf:
.. figure:: img/kernel_nic_intf.*
Components of a DPDK KNI Application
The DPDK KNI Kernel Module
--------------------------
The KNI kernel loadable module ``rte_kni`` provides the kernel interface
for DPDK applications.
When the ``rte_kni`` module is loaded, it will create a device ``/dev/kni``
that is used by the DPDK KNI API functions to control and communicate with
the kernel module.
The ``rte_kni`` kernel module contains several optional parameters which
can be specified when the module is loaded to control its behavior:
.. code-block:: console
# modinfo rte_kni.ko
<snip>
parm: lo_mode: KNI loopback mode (default=lo_mode_none):
lo_mode_none Kernel loopback disabled
lo_mode_fifo Enable kernel loopback with fifo
lo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer
(charp)
parm: kthread_mode: Kernel thread mode (default=single):
single Single kernel thread mode enabled.
multiple Multiple kernel thread mode enabled.
(charp)
parm: carrier: Default carrier state for KNI interface (default=off):
off Interfaces will be created with carrier state set to off.
on Interfaces will be created with carrier state set to on.
(charp)
parm: enable_bifurcated: Enable request processing support for
bifurcated drivers, which means releasing rtnl_lock before calling
userspace callback and supporting async requests (default=off):
on Enable request processing support for bifurcated drivers.
(charp)
parm: min_scheduling_interval: KNI thread min scheduling interval (default=100 microseconds)
(long)
parm: max_scheduling_interval: KNI thread max scheduling interval (default=200 microseconds)
(long)
Loading the ``rte_kni`` kernel module without any optional parameters is
the typical way a DPDK application gets packets into and out of the kernel
network stack. Without any parameters, only one kernel thread is created
for all KNI devices for packet receiving in kernel side, loopback mode is
disabled, and the default carrier state of KNI interfaces is set to *off*.
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko
.. _kni_loopback_mode:
Loopback Mode
~~~~~~~~~~~~~
For testing, the ``rte_kni`` kernel module can be loaded in loopback mode
by specifying the ``lo_mode`` parameter:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo
The ``lo_mode_fifo`` loopback option will loop back ring enqueue/dequeue
operations in kernel space.
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo_skb
The ``lo_mode_fifo_skb`` loopback option will loop back ring enqueue/dequeue
operations and sk buffer copies in kernel space.
If the ``lo_mode`` parameter is not specified, loopback mode is disabled.
.. _kni_kernel_thread_mode:
Kernel Thread Mode
~~~~~~~~~~~~~~~~~~
To provide flexibility of performance, the ``rte_kni`` KNI kernel module
can be loaded with the ``kthread_mode`` parameter. The ``rte_kni`` kernel
module supports two options: "single kernel thread" mode and "multiple
kernel thread" mode.
Single kernel thread mode is enabled as follows:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko kthread_mode=single
This mode will create only one kernel thread for all KNI interfaces to
receive data on the kernel side. By default, this kernel thread is not
bound to any particular core, but the user can set the core affinity for
this kernel thread by setting the ``core_id`` and ``force_bind`` parameters
in ``struct rte_kni_conf`` when the first KNI interface is created:
For optimum performance, the kernel thread should be bound to a core in
on the same socket as the DPDK lcores used in the application.
The KNI kernel module can also be configured to start a separate kernel
thread for each KNI interface created by the DPDK application. Multiple
kernel thread mode is enabled as follows:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko kthread_mode=multiple
This mode will create a separate kernel thread for each KNI interface to
receive data on the kernel side. The core affinity of each ``kni_thread``
kernel thread can be specified by setting the ``core_id`` and ``force_bind``
parameters in ``struct rte_kni_conf`` when each KNI interface is created.
Multiple kernel thread mode can provide scalable higher performance if
sufficient unused cores are available on the host system.
If the ``kthread_mode`` parameter is not specified, the "single kernel
thread" mode is used.
.. _kni_default_carrier_state:
Default Carrier State
~~~~~~~~~~~~~~~~~~~~~
The default carrier state of KNI interfaces created by the ``rte_kni``
kernel module is controlled via the ``carrier`` option when the module
is loaded.
If ``carrier=off`` is specified, the kernel module will leave the carrier
state of the interface *down* when the interface is management enabled.
The DPDK application can set the carrier state of the KNI interface using the
``rte_kni_update_link()`` function. This is useful for DPDK applications
which require that the carrier state of the KNI interface reflect the
actual link state of the corresponding physical NIC port.
If ``carrier=on`` is specified, the kernel module will automatically set
the carrier state of the interface to *up* when the interface is management
enabled. This is useful for DPDK applications which use the KNI interface as
a purely virtual interface that does not correspond to any physical hardware
and do not wish to explicitly set the carrier state of the interface with
``rte_kni_update_link()``. It is also useful for testing in loopback mode
where the NIC port may not be physically connected to anything.
To set the default carrier state to *on*:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko carrier=on
To set the default carrier state to *off*:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko carrier=off
If the ``carrier`` parameter is not specified, the default carrier state
of KNI interfaces will be set to *off*.
.. _kni_bifurcated_device_support:
Bifurcated Device Support
~~~~~~~~~~~~~~~~~~~~~~~~~
User callbacks are executed while kernel module holds the ``rtnl`` lock, this
causes a deadlock when callbacks run control commands on another Linux kernel
network interface.
Bifurcated devices has kernel network driver part and to prevent deadlock for
them ``enable_bifurcated`` is used.
To enable bifurcated device support:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko enable_bifurcated=on
Enabling bifurcated device support releases ``rtnl`` lock before calling
callback and locks it back after callback. Also enables asynchronous request to
support callbacks that requires rtnl lock to work (interface down).
KNI Kthread Scheduling
~~~~~~~~~~~~~~~~~~~~~~
The ``min_scheduling_interval`` and ``max_scheduling_interval`` parameters
control the rescheduling interval of the KNI kthreads.
This might be useful if we have use cases in which we require improved
latency or performance for control plane traffic.
The implementation is backed by Linux High Precision Timers, and uses ``usleep_range``.
Hence, it will have the same granularity constraints as this Linux subsystem.
For Linux High Precision Timers, you can check the following resource: `Kernel Timers <http://www.kernel.org/doc/Documentation/timers/timers-howto.txt>`_
To set the ``min_scheduling_interval`` to a value of 100 microseconds:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko min_scheduling_interval=100
To set the ``max_scheduling_interval`` to a value of 200 microseconds:
.. code-block:: console
# insmod <build_dir>/kernel/linux/kni/rte_kni.ko max_scheduling_interval=200
If the ``min_scheduling_interval`` and ``max_scheduling_interval`` parameters are
not specified, the default interval limits will be set to *100* and *200* respectively.
KNI Creation and Deletion
-------------------------
Before any KNI interfaces can be created, the ``rte_kni`` kernel module must
be loaded into the kernel and configured with the ``rte_kni_init()`` function.
The KNI interfaces are created by a DPDK application dynamically via the
``rte_kni_alloc()`` function.
The ``struct rte_kni_conf`` structure contains fields which allow the
user to specify the interface name, set the MTU size, set an explicit or
random MAC address and control the affinity of the kernel Rx thread(s)
(both single and multi-threaded modes).
By default the KNI sample example gets the MTU from the matching device,
and in case of KNI PMD it is derived from mbuf buffer length.
The ``struct rte_kni_ops`` structure contains pointers to functions to
handle requests from the ``rte_kni`` kernel module. These functions
allow DPDK applications to perform actions when the KNI interfaces are
manipulated by control commands or functions external to the application.
For example, the DPDK application may wish to enabled/disable a physical
NIC port when a user enabled/disables a KNI interface with ``ip link set
[up|down] dev <ifaceX>``. The DPDK application can register a callback for
``config_network_if`` which will be called when the interface management
state changes.
There are currently four callbacks for which the user can register
application functions:
``config_network_if``:
Called when the management state of the KNI interface changes.
For example, when the user runs ``ip link set [up|down] dev <ifaceX>``.
``change_mtu``:
Called when the user changes the MTU size of the KNI
interface. For example, when the user runs ``ip link set mtu <size>
dev <ifaceX>``.
``config_mac_address``:
Called when the user changes the MAC address of the KNI interface.
For example, when the user runs ``ip link set address <MAC>
dev <ifaceX>``. If the user sets this callback function to NULL,
but sets the ``port_id`` field to a value other than -1, a default
callback handler in the rte_kni library ``kni_config_mac_address()``
will be called which calls ``rte_eth_dev_default_mac_addr_set()``
on the specified ``port_id``.
``config_promiscusity``:
Called when the user changes the promiscuity state of the KNI
interface. For example, when the user runs ``ip link set promisc
[on|off] dev <ifaceX>``. If the user sets this callback function to
NULL, but sets the ``port_id`` field to a value other than -1, a default
callback handler in the rte_kni library ``kni_config_promiscusity()``
will be called which calls ``rte_eth_promiscuous_<enable|disable>()``
on the specified ``port_id``.
``config_allmulticast``:
Called when the user changes the allmulticast state of the KNI interface.
For example, when the user runs ``ifconfig <ifaceX> [-]allmulti``. If the
user sets this callback function to NULL, but sets the ``port_id`` field to
a value other than -1, a default callback handler in the rte_kni library
``kni_config_allmulticast()`` will be called which calls
``rte_eth_allmulticast_<enable|disable>()`` on the specified ``port_id``.
In order to run these callbacks, the application must periodically call
the ``rte_kni_handle_request()`` function. Any user callback function
registered will be called directly from ``rte_kni_handle_request()`` so
care must be taken to prevent deadlock and to not block any DPDK fastpath
tasks. Typically DPDK applications which use these callbacks will need
to create a separate thread or secondary process to periodically call
``rte_kni_handle_request()``.
The KNI interfaces can be deleted by a DPDK application with
``rte_kni_release()``. All KNI interfaces not explicitly deleted will be
deleted when the ``/dev/kni`` device is closed, either explicitly with
``rte_kni_close()`` or when the DPDK application is closed.
DPDK mbuf Flow
--------------
To minimize the amount of DPDK code running in kernel space, the mbuf mempool is managed in userspace only.
The kernel module will be aware of mbufs,
but all mbuf allocation and free operations will be handled by the DPDK application only.
:numref:`figure_pkt_flow_kni` shows a typical scenario with packets sent in both directions.
.. _figure_pkt_flow_kni:
.. figure:: img/pkt_flow_kni.*
Packet Flow via mbufs in the DPDK KNI
Use Case: Ingress
-----------------
On the DPDK RX side, the mbuf is allocated by the PMD in the RX thread context.
This thread will enqueue the mbuf in the rx_q FIFO,
and the next pointers in mbuf-chain will convert to physical address.
The KNI thread will poll all KNI active devices for the rx_q.
If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx().
The dequeued mbuf must be freed, so the same pointer is sent back in the free_q FIFO,
and next pointers must convert back to virtual address if exists before put in the free_q FIFO.
The RX thread, in the same main loop, polls this FIFO and frees the mbuf after dequeuing it.
The address conversion of the next pointer is to prevent the chained mbuf
in different hugepage segments from causing kernel crash.
Use Case: Egress
----------------
For packet egress the DPDK application must first enqueue several mbufs to create an mbuf cache on the kernel side.
The packet is received from the Linux net stack, by calling the kni_net_tx() callback.
The mbuf is dequeued (without waiting due the cache) and filled with data from sk_buff.
The sk_buff is then freed and the mbuf sent in the tx_q FIFO.
The DPDK TX thread dequeues the mbuf and sends it to the PMD via ``rte_eth_tx_burst()``.
It then puts the mbuf back in the cache.
IOVA = VA: Support
------------------
KNI operates in IOVA_VA scheme when
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) and
- EAL option `iova-mode=va` is passed or bus IOVA scheme in the DPDK is selected
as RTE_IOVA_VA.
Due to IOVA to KVA address translations, based on the KNI use case there
can be a performance impact. For mitigation, forcing IOVA to PA via EAL
"--iova-mode=pa" option can be used, IOVA_DC bus iommu scheme can also
result in IOVA as PA.
Ethtool
-------
Ethtool is a Linux-specific tool with corresponding support in the kernel.
The current version of kni provides minimal ethtool functionality
including querying version and link state. It does not support link
control, statistics, or dumping device registers.

View File

@ -1,242 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2017 Intel Corporation.
Flow Classify Sample Application
================================
The Flow Classify sample application is based on the simple *skeleton* example
of a forwarding application.
It is intended as a demonstration of the basic components of a DPDK forwarding
application which uses the Flow Classify library API's.
Please refer to the
:doc:`../prog_guide/flow_classify_lib`
for more information.
Compiling the Application
-------------------------
To compile the sample application see :doc:`compiling`.
The application is located in the ``flow_classify`` sub-directory.
Running the Application
-----------------------
To run the example in a ``linux`` environment:
.. code-block:: console
./<build_dir>/examples/dpdk-flow_classify -c 4 -n 4 -- /
--rule_ipv4="../ipv4_rules_file.txt"
Please refer to the *DPDK Getting Started Guide*, section
:doc:`../linux_gsg/build_sample_apps`
for general information on running applications and the Environment Abstraction
Layer (EAL) options.
Sample ipv4_rules_file.txt
--------------------------
.. code-block:: console
#file format:
#src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
#
2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
Explanation
-----------
The following sections provide an explanation of the main components of the
code.
All DPDK library functions used in the sample code are prefixed with ``rte_``
and are explained in detail in the *DPDK API Documentation*.
ACL field definitions for the IPv4 5 tuple rule
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following field definitions are used when creating the ACL table during
initialisation of the ``Flow Classify`` application
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Creation of ACL table during initialization of application. 8<
:end-before: >8 End of creation of ACL table.
The Main Function
~~~~~~~~~~~~~~~~~
The ``main()`` function performs the initialization and calls the execution
threads for each lcore.
The first task is to initialize the Environment Abstraction Layer (EAL).
The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
function. The value returned is the number of parsed arguments:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Initialize the Environment Abstraction Layer (EAL). 8<
:end-before: >8 End of initialization of EAL.
:dedent: 1
It then parses the flow_classify application arguments
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Parse application arguments (after the EAL ones). 8<
:end-before: >8 End of parse application arguments.
:dedent: 1
The ``main()`` function also allocates a mempool to hold the mbufs
(Message Buffers) used by the application:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Creates a new mempool in memory to hold the mbufs. 8<
:end-before: >8 End of creation of new mempool in memory.
:dedent: 1
mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
The ``main()`` function also initializes all the ports using the user defined
``port_init()`` function which is explained in the next section:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Initialize all ports. 8<
:end-before: >8 End of initialization of all ports.
:dedent: 1
The ``main()`` function creates the ``flow classifier object`` and adds an ``ACL
table`` to the flow classifier.
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Creation of flow classifier object. 8<
:end-before: >8 End of creation of flow classifier object.
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Memory allocation. 8<
:end-before: >8 End of initialization of table create params.
:dedent: 1
It then reads the ipv4_rules_file.txt file and initialises the parameters for
the ``rte_flow_classify_table_entry_add`` API.
This API adds a rule to the ACL table.
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Read file of IPv4 tuple rules. 8<
:end-before: >8 End of reading file of IPv4 5 tuple rules.
:dedent: 1
Once the initialization is complete, the application is ready to launch a
function on an lcore. In this example ``lcore_main()`` is called on a single
lcore.
.. code-block:: c
lcore_main(cls_app);
The ``lcore_main()`` function is explained below.
The Port Initialization Function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The main functional part of the port initialization used in the Basic
Forwarding application is shown below:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Initializing port using global settings. 8<
:end-before: >8 End of initializing a given port.
The Ethernet ports are configured with default settings using the
``rte_eth_dev_configure()`` function.
For this example the ports are set up with 1 RX and 1 TX queue using the
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
The Ethernet port is then started:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Start the Ethernet port. 8<
:end-before: >8 End of starting the Ethernet port.
:dedent: 1
Finally the RX port is set in promiscuous mode:
.. code-block:: c
retval = rte_eth_promiscuous_enable(port);
The Add Rules function
~~~~~~~~~~~~~~~~~~~~~~
The ``add_rules`` function reads the ``ipv4_rules_file.txt`` file and calls the
``add_classify_rule`` function which calls the
``rte_flow_classify_table_entry_add`` API.
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Reads file and calls the add_classify_rule function. 8<
:end-before: >8 End of add_rules.
The Lcore Main function
~~~~~~~~~~~~~~~~~~~~~~~
As we saw above the ``main()`` function calls an application function on the
available lcores.
The ``lcore_main`` function calls the ``rte_flow_classifier_query`` API.
For the Basic Forwarding application the ``lcore_main`` function looks like the
following:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Flow classify data. 8<
:end-before: >8 End of flow classify data.
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Classifying the packets. 8<
:end-before: >8 End of lcore main.
The main work of the application is done within the loop:
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
:language: c
:start-after: Run until the application is quit or killed. 8<
:end-before: >8 End of main loop.
:dedent: 1
Packets are received in bursts on the RX ports and transmitted in bursts on
the TX ports. The ports are grouped in pairs with a simple mapping scheme
using the an XOR on the port number::
0 -> 1
1 -> 0
2 -> 3
3 -> 2
etc.
The ``rte_eth_tx_burst()`` function frees the memory buffers of packets that
are transmitted. If packets fail to transmit, ``(nb_tx < nb_rx)``, then they
must be freed explicitly using ``rte_pktmbuf_free()``.
The forwarding loop can be interrupted and the application closed using
``Ctrl-C``.

View File

@ -1,32 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#ifndef _ACC200_CFG_H_
#define _ACC200_CFG_H_
/**
* @file acc200_cfg.h
*
* Functions for configuring ACC200 HW.
* Configuration related to encoding/decoding is done through the
* librte_bbdev library.
*/
/**
* Configure a ACC200 device.
*
* @param dev_name
* The name of the device. This is the short form of PCI BDF, e.g. 00:01.0.
* It can also be retrieved for a bbdev device from the dev_name field in the
* rte_bbdev_info structure returned by rte_bbdev_info_get().
* @param conf
* Configuration to apply to ACC200 HW.
*
* @return
* Zero on success, negative value on failure.
*/
int
acc200_configure(const char *dev_name, struct rte_acc_conf *conf);
#endif /* _ACC200_CFG_H_ */

View File

@ -1,108 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2021 Intel Corporation
*/
#ifndef ACC200_PF_ENUM_H
#define ACC200_PF_ENUM_H
/*
* ACC200 Register mapping on PF BAR0
* This is automatically generated from RDL, format may change with new RDL
* Release.
* Variable names are as is
*/
enum {
HWPfQmgrEgressQueuesTemplate = 0x0007FC00,
HWPfQmgrIngressAq = 0x00080000,
HWPfQmgrDepthLog2Grp = 0x00A00200,
HWPfQmgrTholdGrp = 0x00A00300,
HWPfQmgrGrpTmplateReg0Indx = 0x00A00600,
HWPfQmgrGrpTmplateReg1Indx = 0x00A00700,
HWPfQmgrGrpTmplateReg2indx = 0x00A00800,
HWPfQmgrGrpTmplateReg3Indx = 0x00A00900,
HWPfQmgrGrpTmplateReg4Indx = 0x00A00A00,
HWPfQmgrVfBaseAddr = 0x00A01000,
HWPfQmgrArbQDepthGrp = 0x00A02F00,
HWPfQmgrGrpFunction0 = 0x00A02F40,
HWPfQmgrGrpFunction1 = 0x00A02F44,
HWPfQmgrGrpPriority = 0x00A02F48,
HWPfQmgrAqEnableVf = 0x00A10000,
HWPfQmgrRingSizeVf = 0x00A20004,
HWPfQmgrGrpDepthLog20Vf = 0x00A20008,
HWPfQmgrGrpDepthLog21Vf = 0x00A2000C,
HWPfFabricM2iBufferReg = 0x00B30000,
HWPfFabricI2Mdma_weight = 0x00B31044,
HwPfFecUl5gIbDebugReg = 0x00B40200,
HWPfFftConfig0 = 0x00B58004,
HWPfFftRamPageAccess = 0x00B5800C,
HWPfFftRamOff = 0x00B58800,
HWPfDmaConfig0Reg = 0x00B80000,
HWPfDmaConfig1Reg = 0x00B80004,
HWPfDmaQmgrAddrReg = 0x00B80008,
HWPfDmaAxcacheReg = 0x00B80010,
HWPfDmaAxiControl = 0x00B8002C,
HWPfDmaQmanen = 0x00B80040,
HWPfDma4gdlIbThld = 0x00B800CC,
HWPfDmaCfgRrespBresp = 0x00B80814,
HWPfDmaDescriptorSignatuture = 0x00B80868,
HWPfDmaErrorDetectionEn = 0x00B80870,
HWPfDmaFec5GulDescBaseLoRegVf = 0x00B88020,
HWPfDmaFec5GulDescBaseHiRegVf = 0x00B88024,
HWPfDmaFec5GulRespPtrLoRegVf = 0x00B88028,
HWPfDmaFec5GulRespPtrHiRegVf = 0x00B8802C,
HWPfDmaFec5GdlDescBaseLoRegVf = 0x00B88040,
HWPfDmaFec5GdlDescBaseHiRegVf = 0x00B88044,
HWPfDmaFec5GdlRespPtrLoRegVf = 0x00B88048,
HWPfDmaFec5GdlRespPtrHiRegVf = 0x00B8804C,
HWPfDmaFec4GulDescBaseLoRegVf = 0x00B88060,
HWPfDmaFec4GulDescBaseHiRegVf = 0x00B88064,
HWPfDmaFec4GulRespPtrLoRegVf = 0x00B88068,
HWPfDmaFec4GulRespPtrHiRegVf = 0x00B8806C,
HWPfDmaFec4GdlDescBaseLoRegVf = 0x00B88080,
HWPfDmaFec4GdlDescBaseHiRegVf = 0x00B88084,
HWPfDmaFec4GdlRespPtrLoRegVf = 0x00B88088,
HWPfDmaFec4GdlRespPtrHiRegVf = 0x00B8808C,
HWPDmaFftDescBaseLoRegVf = 0x00B880A0,
HWPDmaFftDescBaseHiRegVf = 0x00B880A4,
HWPDmaFftRespPtrLoRegVf = 0x00B880A8,
HWPDmaFftRespPtrHiRegVf = 0x00B880AC,
HWPfQosmonAEvalOverflow0 = 0x00B90008,
HWPfPermonACntrlRegVf = 0x00B98000,
HWPfQosmonBEvalOverflow0 = 0x00BA0008,
HWPfPermonBCntrlRegVf = 0x00BA8000,
HWPfPermonCCntrlRegVf = 0x00BB8000,
HWPfHiInfoRingBaseLoRegPf = 0x00C84014,
HWPfHiInfoRingBaseHiRegPf = 0x00C84018,
HWPfHiInfoRingPointerRegPf = 0x00C8401C,
HWPfHiInfoRingIntWrEnRegPf = 0x00C84020,
HWPfHiBlockTransmitOnErrorEn = 0x00C84038,
HWPfHiCfgMsiIntWrEnRegPf = 0x00C84040,
HWPfHiMsixVectorMapperPf = 0x00C84060,
HWPfHiPfMode = 0x00C84108,
HWPfHiClkGateHystReg = 0x00C8410C,
HWPfHiMsiDropEnableReg = 0x00C84114,
HWPfHiSectionPowerGatingReq = 0x00C84128,
HWPfHiSectionPowerGatingAck = 0x00C8412C,
};
/* TIP PF Interrupt numbers */
enum {
ACC200_PF_INT_QMGR_AQ_OVERFLOW = 0,
ACC200_PF_INT_DOORBELL_VF_2_PF = 1,
ACC200_PF_INT_ILLEGAL_FORMAT = 2,
ACC200_PF_INT_QMGR_DISABLED_ACCESS = 3,
ACC200_PF_INT_QMGR_AQ_OVERTHRESHOLD = 4,
ACC200_PF_INT_DMA_DL_DESC_IRQ = 5,
ACC200_PF_INT_DMA_UL_DESC_IRQ = 6,
ACC200_PF_INT_DMA_FFT_DESC_IRQ = 7,
ACC200_PF_INT_DMA_UL5G_DESC_IRQ = 8,
ACC200_PF_INT_DMA_DL5G_DESC_IRQ = 9,
ACC200_PF_INT_DMA_MLD_DESC_IRQ = 10,
ACC200_PF_INT_ARAM_ECC_1BIT_ERR = 11,
ACC200_PF_INT_PARITY_ERR = 12,
ACC200_PF_INT_QMGR_ERR = 13,
ACC200_PF_INT_INT_REQ_OVERFLOW = 14,
ACC200_PF_INT_APB_TIMEOUT = 15,
};
#endif /* ACC200_PF_ENUM_H */

View File

@ -1,196 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#ifndef _RTE_ACC200_PMD_H_
#define _RTE_ACC200_PMD_H_
#include "acc_common.h"
#include "acc200_pf_enum.h"
#include "acc200_vf_enum.h"
#include "acc200_cfg.h"
/* Helper macro for logging */
#define rte_bbdev_log(level, fmt, ...) \
rte_log(RTE_LOG_ ## level, acc200_logtype, fmt "\n", \
##__VA_ARGS__)
#ifdef RTE_LIBRTE_BBDEV_DEBUG
#define rte_bbdev_log_debug(fmt, ...) \
rte_bbdev_log(DEBUG, "acc200_pmd: " fmt, \
##__VA_ARGS__)
#else
#define rte_bbdev_log_debug(fmt, ...)
#endif
/* ACC200 PF and VF driver names */
#define ACC200PF_DRIVER_NAME intel_acc200_pf
#define ACC200VF_DRIVER_NAME intel_acc200_vf
/* ACC200 PCI vendor & device IDs */
#define RTE_ACC200_VENDOR_ID (0x8086)
#define RTE_ACC200_PF_DEVICE_ID (0x57C0)
#define RTE_ACC200_VF_DEVICE_ID (0x57C1)
#define ACC200_MAX_PF_MSIX (256+32)
#define ACC200_MAX_VF_MSIX (256+7)
/* Values used in writing to the registers */
#define ACC200_REG_IRQ_EN_ALL 0x1FF83FF /* Enable all interrupts */
/* Number of Virtual Functions ACC200 supports */
#define ACC200_NUM_VFS 16
#define ACC200_NUM_QGRPS 16
#define ACC200_NUM_AQS 16
#define ACC200_GRP_ID_SHIFT 10 /* Queue Index Hierarchy */
#define ACC200_VF_ID_SHIFT 4 /* Queue Index Hierarchy */
#define ACC200_WORDS_IN_ARAM_SIZE (256 * 1024 / 4)
/* Mapping of signals for the available engines */
#define ACC200_SIG_UL_5G 0
#define ACC200_SIG_UL_5G_LAST 4
#define ACC200_SIG_DL_5G 10
#define ACC200_SIG_DL_5G_LAST 11
#define ACC200_SIG_UL_4G 12
#define ACC200_SIG_UL_4G_LAST 16
#define ACC200_SIG_DL_4G 21
#define ACC200_SIG_DL_4G_LAST 23
#define ACC200_SIG_FFT 24
#define ACC200_SIG_FFT_LAST 24
#define ACC200_NUM_ACCS 5
/* ACC200 Configuration */
#define ACC200_FABRIC_MODE 0x8000103
#define ACC200_CFG_DMA_ERROR 0x3DF
#define ACC200_CFG_AXI_CACHE 0x11
#define ACC200_CFG_QMGR_HI_P 0x0F0F
#define ACC200_RESET_HARD 0x1FF
#define ACC200_ENGINES_MAX 9
#define ACC200_GPEX_AXIMAP_NUM 17
#define ACC200_CLOCK_GATING_EN 0x30000
#define ACC200_FFT_CFG_0 0x2001
#define ACC200_FFT_RAM_EN 0x80008000
#define ACC200_FFT_RAM_DIS 0x0
#define ACC200_FFT_RAM_SIZE 512
#define ACC200_CLK_EN 0x00010A01
#define ACC200_CLK_DIS 0x01F10A01
#define ACC200_PG_MASK_0 0x1F
#define ACC200_PG_MASK_1 0xF
#define ACC200_PG_MASK_2 0x1
#define ACC200_PG_MASK_3 0x0
#define ACC200_PG_MASK_FFT 1
#define ACC200_PG_MASK_4GUL 4
#define ACC200_PG_MASK_5GUL 8
#define ACC200_STATUS_WAIT 10
#define ACC200_STATUS_TO 100
struct acc200_registry_addr {
unsigned int dma_ring_dl5g_hi;
unsigned int dma_ring_dl5g_lo;
unsigned int dma_ring_ul5g_hi;
unsigned int dma_ring_ul5g_lo;
unsigned int dma_ring_dl4g_hi;
unsigned int dma_ring_dl4g_lo;
unsigned int dma_ring_ul4g_hi;
unsigned int dma_ring_ul4g_lo;
unsigned int dma_ring_fft_hi;
unsigned int dma_ring_fft_lo;
unsigned int ring_size;
unsigned int info_ring_hi;
unsigned int info_ring_lo;
unsigned int info_ring_en;
unsigned int info_ring_ptr;
unsigned int tail_ptrs_dl5g_hi;
unsigned int tail_ptrs_dl5g_lo;
unsigned int tail_ptrs_ul5g_hi;
unsigned int tail_ptrs_ul5g_lo;
unsigned int tail_ptrs_dl4g_hi;
unsigned int tail_ptrs_dl4g_lo;
unsigned int tail_ptrs_ul4g_hi;
unsigned int tail_ptrs_ul4g_lo;
unsigned int tail_ptrs_fft_hi;
unsigned int tail_ptrs_fft_lo;
unsigned int depth_log0_offset;
unsigned int depth_log1_offset;
unsigned int qman_group_func;
unsigned int hi_mode;
unsigned int pmon_ctrl_a;
unsigned int pmon_ctrl_b;
unsigned int pmon_ctrl_c;
};
/* Structure holding registry addresses for PF */
static const struct acc200_registry_addr pf_reg_addr = {
.dma_ring_dl5g_hi = HWPfDmaFec5GdlDescBaseHiRegVf,
.dma_ring_dl5g_lo = HWPfDmaFec5GdlDescBaseLoRegVf,
.dma_ring_ul5g_hi = HWPfDmaFec5GulDescBaseHiRegVf,
.dma_ring_ul5g_lo = HWPfDmaFec5GulDescBaseLoRegVf,
.dma_ring_dl4g_hi = HWPfDmaFec4GdlDescBaseHiRegVf,
.dma_ring_dl4g_lo = HWPfDmaFec4GdlDescBaseLoRegVf,
.dma_ring_ul4g_hi = HWPfDmaFec4GulDescBaseHiRegVf,
.dma_ring_ul4g_lo = HWPfDmaFec4GulDescBaseLoRegVf,
.dma_ring_fft_hi = HWPDmaFftDescBaseHiRegVf,
.dma_ring_fft_lo = HWPDmaFftDescBaseLoRegVf,
.ring_size = HWPfQmgrRingSizeVf,
.info_ring_hi = HWPfHiInfoRingBaseHiRegPf,
.info_ring_lo = HWPfHiInfoRingBaseLoRegPf,
.info_ring_en = HWPfHiInfoRingIntWrEnRegPf,
.info_ring_ptr = HWPfHiInfoRingPointerRegPf,
.tail_ptrs_dl5g_hi = HWPfDmaFec5GdlRespPtrHiRegVf,
.tail_ptrs_dl5g_lo = HWPfDmaFec5GdlRespPtrLoRegVf,
.tail_ptrs_ul5g_hi = HWPfDmaFec5GulRespPtrHiRegVf,
.tail_ptrs_ul5g_lo = HWPfDmaFec5GulRespPtrLoRegVf,
.tail_ptrs_dl4g_hi = HWPfDmaFec4GdlRespPtrHiRegVf,
.tail_ptrs_dl4g_lo = HWPfDmaFec4GdlRespPtrLoRegVf,
.tail_ptrs_ul4g_hi = HWPfDmaFec4GulRespPtrHiRegVf,
.tail_ptrs_ul4g_lo = HWPfDmaFec4GulRespPtrLoRegVf,
.tail_ptrs_fft_hi = HWPDmaFftRespPtrHiRegVf,
.tail_ptrs_fft_lo = HWPDmaFftRespPtrLoRegVf,
.depth_log0_offset = HWPfQmgrGrpDepthLog20Vf,
.depth_log1_offset = HWPfQmgrGrpDepthLog21Vf,
.qman_group_func = HWPfQmgrGrpFunction0,
.hi_mode = HWPfHiMsixVectorMapperPf,
.pmon_ctrl_a = HWPfPermonACntrlRegVf,
.pmon_ctrl_b = HWPfPermonBCntrlRegVf,
.pmon_ctrl_c = HWPfPermonCCntrlRegVf,
};
/* Structure holding registry addresses for VF */
static const struct acc200_registry_addr vf_reg_addr = {
.dma_ring_dl5g_hi = HWVfDmaFec5GdlDescBaseHiRegVf,
.dma_ring_dl5g_lo = HWVfDmaFec5GdlDescBaseLoRegVf,
.dma_ring_ul5g_hi = HWVfDmaFec5GulDescBaseHiRegVf,
.dma_ring_ul5g_lo = HWVfDmaFec5GulDescBaseLoRegVf,
.dma_ring_dl4g_hi = HWVfDmaFec4GdlDescBaseHiRegVf,
.dma_ring_dl4g_lo = HWVfDmaFec4GdlDescBaseLoRegVf,
.dma_ring_ul4g_hi = HWVfDmaFec4GulDescBaseHiRegVf,
.dma_ring_ul4g_lo = HWVfDmaFec4GulDescBaseLoRegVf,
.dma_ring_fft_hi = HWVfDmaFftDescBaseHiRegVf,
.dma_ring_fft_lo = HWVfDmaFftDescBaseLoRegVf,
.ring_size = HWVfQmgrRingSizeVf,
.info_ring_hi = HWVfHiInfoRingBaseHiVf,
.info_ring_lo = HWVfHiInfoRingBaseLoVf,
.info_ring_en = HWVfHiInfoRingIntWrEnVf,
.info_ring_ptr = HWVfHiInfoRingPointerVf,
.tail_ptrs_dl5g_hi = HWVfDmaFec5GdlRespPtrHiRegVf,
.tail_ptrs_dl5g_lo = HWVfDmaFec5GdlRespPtrLoRegVf,
.tail_ptrs_ul5g_hi = HWVfDmaFec5GulRespPtrHiRegVf,
.tail_ptrs_ul5g_lo = HWVfDmaFec5GulRespPtrLoRegVf,
.tail_ptrs_dl4g_hi = HWVfDmaFec4GdlRespPtrHiRegVf,
.tail_ptrs_dl4g_lo = HWVfDmaFec4GdlRespPtrLoRegVf,
.tail_ptrs_ul4g_hi = HWVfDmaFec4GulRespPtrHiRegVf,
.tail_ptrs_ul4g_lo = HWVfDmaFec4GulRespPtrLoRegVf,
.tail_ptrs_fft_hi = HWVfDmaFftRespPtrHiRegVf,
.tail_ptrs_fft_lo = HWVfDmaFftRespPtrLoRegVf,
.depth_log0_offset = HWVfQmgrGrpDepthLog20Vf,
.depth_log1_offset = HWVfQmgrGrpDepthLog21Vf,
.qman_group_func = HWVfQmgrGrpFunction0Vf,
.hi_mode = HWVfHiMsixVectorMapperVf,
.pmon_ctrl_a = HWVfPmACntrlRegVf,
.pmon_ctrl_b = HWVfPmBCntrlRegVf,
.pmon_ctrl_c = HWVfPmCCntrlRegVf,
};
#endif /* _RTE_ACC200_PMD_H_ */

View File

@ -1,83 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2021 Intel Corporation
*/
#ifndef ACC200_VF_ENUM_H
#define ACC200_VF_ENUM_H
/*
* ACC200 Register mapping on VF BAR0
* This is automatically generated from RDL, format may change with new RDL
*/
enum {
HWVfQmgrIngressAq = 0x00000000,
HWVfHiVfToPfDbellVf = 0x00000800,
HWVfHiPfToVfDbellVf = 0x00000808,
HWVfHiInfoRingBaseLoVf = 0x00000810,
HWVfHiInfoRingBaseHiVf = 0x00000814,
HWVfHiInfoRingPointerVf = 0x00000818,
HWVfHiInfoRingIntWrEnVf = 0x00000820,
HWVfHiInfoRingPf2VfWrEnVf = 0x00000824,
HWVfHiMsixVectorMapperVf = 0x00000860,
HWVfDmaFec5GulDescBaseLoRegVf = 0x00000920,
HWVfDmaFec5GulDescBaseHiRegVf = 0x00000924,
HWVfDmaFec5GulRespPtrLoRegVf = 0x00000928,
HWVfDmaFec5GulRespPtrHiRegVf = 0x0000092C,
HWVfDmaFec5GdlDescBaseLoRegVf = 0x00000940,
HWVfDmaFec5GdlDescBaseHiRegVf = 0x00000944,
HWVfDmaFec5GdlRespPtrLoRegVf = 0x00000948,
HWVfDmaFec5GdlRespPtrHiRegVf = 0x0000094C,
HWVfDmaFec4GulDescBaseLoRegVf = 0x00000960,
HWVfDmaFec4GulDescBaseHiRegVf = 0x00000964,
HWVfDmaFec4GulRespPtrLoRegVf = 0x00000968,
HWVfDmaFec4GulRespPtrHiRegVf = 0x0000096C,
HWVfDmaFec4GdlDescBaseLoRegVf = 0x00000980,
HWVfDmaFec4GdlDescBaseHiRegVf = 0x00000984,
HWVfDmaFec4GdlRespPtrLoRegVf = 0x00000988,
HWVfDmaFec4GdlRespPtrHiRegVf = 0x0000098C,
HWVfDmaFftDescBaseLoRegVf = 0x000009A0,
HWVfDmaFftDescBaseHiRegVf = 0x000009A4,
HWVfDmaFftRespPtrLoRegVf = 0x000009A8,
HWVfDmaFftRespPtrHiRegVf = 0x000009AC,
HWVfQmgrAqResetVf = 0x00000E00,
HWVfQmgrRingSizeVf = 0x00000E04,
HWVfQmgrGrpDepthLog20Vf = 0x00000E08,
HWVfQmgrGrpDepthLog21Vf = 0x00000E0C,
HWVfQmgrGrpFunction0Vf = 0x00000E10,
HWVfQmgrGrpFunction1Vf = 0x00000E14,
HWVfPmACntrlRegVf = 0x00000F40,
HWVfPmACountVf = 0x00000F48,
HWVfPmAKCntLoVf = 0x00000F50,
HWVfPmAKCntHiVf = 0x00000F54,
HWVfPmADeltaCntLoVf = 0x00000F60,
HWVfPmADeltaCntHiVf = 0x00000F64,
HWVfPmBCntrlRegVf = 0x00000F80,
HWVfPmBCountVf = 0x00000F88,
HWVfPmBKCntLoVf = 0x00000F90,
HWVfPmBKCntHiVf = 0x00000F94,
HWVfPmBDeltaCntLoVf = 0x00000FA0,
HWVfPmBDeltaCntHiVf = 0x00000FA4,
HWVfPmCCntrlRegVf = 0x00000FC0,
HWVfPmCCountVf = 0x00000FC8,
HWVfPmCKCntLoVf = 0x00000FD0,
HWVfPmCKCntHiVf = 0x00000FD4,
HWVfPmCDeltaCntLoVf = 0x00000FE0,
HWVfPmCDeltaCntHiVf = 0x00000FE4
};
/* TIP VF Interrupt numbers */
enum {
ACC200_VF_INT_QMGR_AQ_OVERFLOW = 0,
ACC200_VF_INT_DOORBELL_PF_2_VF = 1,
ACC200_VF_INT_ILLEGAL_FORMAT = 2,
ACC200_VF_INT_QMGR_DISABLED_ACCESS = 3,
ACC200_VF_INT_QMGR_AQ_OVERTHRESHOLD = 4,
ACC200_VF_INT_DMA_DL_DESC_IRQ = 5,
ACC200_VF_INT_DMA_UL_DESC_IRQ = 6,
ACC200_VF_INT_DMA_FFT_DESC_IRQ = 7,
ACC200_VF_INT_DMA_UL5G_DESC_IRQ = 8,
ACC200_VF_INT_DMA_DL5G_DESC_IRQ = 9,
ACC200_VF_INT_DMA_MLD_DESC_IRQ = 10,
};
#endif /* ACC200_VF_ENUM_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2023 Red Hat, Inc.
*/
#include <rte_log.h>
RTE_LOG_REGISTER_SUFFIX(acc_common_logtype, common, INFO);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,567 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _VIRTCHNL_INLINE_IPSEC_H_
#define _VIRTCHNL_INLINE_IPSEC_H_
#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SA_DESTROY 0
#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
#define VIRTCHNL_CIPHER 2
#define VIRTCHNL_AEAD 3
/* caps enabled */
#define VIRTCHNL_IPSEC_ESN_ENA BIT(0)
#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1)
#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2)
#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3)
#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4)
#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5)
#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6)
#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7)
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
#define VIRTCHNL_PROTO_AH 2
#define VIRTCHNL_PROTO_RSVD1 3
/* sa mode */
#define VIRTCHNL_SA_MODE_TRANSPORT 1
#define VIRTCHNL_SA_MODE_TUNNEL 2
#define VIRTCHNL_SA_MODE_TRAN_TUN 3
#define VIRTCHNL_SA_MODE_UNKNOWN 4
/* sa direction */
#define VIRTCHNL_DIR_INGRESS 1
#define VIRTCHNL_DIR_EGRESS 2
#define VIRTCHNL_DIR_INGRESS_EGRESS 3
/* sa termination */
#define VIRTCHNL_TERM_SOFTWARE 1
#define VIRTCHNL_TERM_HARDWARE 2
/* sa ip type */
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* for virtchnl_ipsec_resp */
enum inline_ipsec_resp {
INLINE_IPSEC_SUCCESS = 0,
INLINE_IPSEC_FAIL = -1,
INLINE_IPSEC_ERR_FIFO_FULL = -2,
INLINE_IPSEC_ERR_NOT_READY = -3,
INLINE_IPSEC_ERR_VF_DOWN = -4,
INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
INLINE_IPSEC_ERR_NO_MEM = -6,
};
/* Detailed opcodes for DPDK and IPsec use */
enum inline_ipsec_ops {
INLINE_IPSEC_OP_GET_CAP = 0,
INLINE_IPSEC_OP_GET_STATUS = 1,
INLINE_IPSEC_OP_SA_CREATE = 2,
INLINE_IPSEC_OP_SA_UPDATE = 3,
INLINE_IPSEC_OP_SA_DESTROY = 4,
INLINE_IPSEC_OP_SP_CREATE = 5,
INLINE_IPSEC_OP_SP_DESTROY = 6,
INLINE_IPSEC_OP_SA_READ = 7,
INLINE_IPSEC_OP_EVENT = 8,
INLINE_IPSEC_OP_RESP = 9,
};
#pragma pack(1)
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
u16 block_size;
u16 min_key_size;
u16 max_key_size;
u16 inc_key_size;
u16 min_iv_size;
u16 max_iv_size;
u16 inc_iv_size;
u16 min_digest_size;
u16 max_digest_size;
u16 inc_digest_size;
u16 min_aad_size;
u16 max_aad_size;
u16 inc_aad_size;
};
#pragma pack()
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
u8 crypto_type;
u8 algo_cap_num;
struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
};
/* VIRTCHNL_OP_GET_IPSEC_CAP
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
#pragma pack(1)
struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
u8 virtchnl_protocol_type;
/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
u8 virtchnl_sa_mode;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 termination_mode;
/* number of supported crypto capability */
u8 crypto_cap_num;
/* descriptor ID */
u16 desc_id;
/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
u32 caps_enabled;
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
u32 algo_type;
/* Length of valid IV data. */
u16 iv_len;
/* Length of digest */
u16 digest_len;
/* SA salt */
u32 salt;
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
#pragma pack()
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
* an unique index (sa_idx) for the created SA.
*/
struct virtchnl_ipsec_sa_cfg {
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* type of outer IP - IPv4/IPv6 */
u8 virtchnl_ip_type;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* udp encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* reserved */
u8 reserved1;
/* SA security parameter index */
u32 spi;
/* outer src ip address */
u8 src_addr[16];
/* outer dst ip address */
u8 dst_addr[16];
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* When enabled, sa_index must be valid */
u8 sa_index_en;
/* SA index when sa_index_en is true */
u32 sa_index;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When enabled, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-reply window check - enable/disable
* When enabled, arw_size must be valid.
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* no ip offload mode - enable/disable
* When enabled, ip type and address must not be valid.
*/
u8 no_ip_offload_en;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
* PF will update SA according to configuration
*/
struct virtchnl_ipsec_sa_update {
u32 sa_index; /* SA to update */
u32 esn_hi; /* high 32 bits of esn */
u32 esn_low; /* low 32 bits of esn */
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
* flag bitmap indicate all SA or just selected SA will
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
/* All zero bitmap indicates all SA will be destroyed.
* Non-zero bitmap indicates the selected SA in
* array sa_index will be destroyed.
*/
u8 flag;
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
/* VIRTCHNL_OP_IPSEC_SA_READ
* VF send this SA configuration to PF using virtchnl;
* PF read SA and will return configuration for the created SA.
*/
struct virtchnl_ipsec_sa_read {
/* SA valid - invalid/valid */
u8 valid;
/* SA active - inactive/active */
u8 active;
/* SA SN rollover - not_rollover/rollover */
u8 sn_rollover;
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When set to limit, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-replay window check - enable/disable
* When set to check, arw_size, arw_top, and arw must be valid
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* reserved */
u8 reserved1;
/* top of anti-replay-window */
u64 arw_top;
/* anti-replay-window */
u8 arw[16];
/* packets processed */
u64 packets_processed;
/* bytes processed */
u64 bytes_processed;
/* packets dropped */
u32 packets_dropped;
/* authentication failures */
u32 auth_fails;
/* ARW check failures */
u32 arw_fails;
/* type of esn - enable/disable */
u8 esn;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* SA security parameter index */
u32 spi;
/* SA salt */
u32 salt;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* Add allowlist entry in IES */
struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
/* Drop frame if true or redirect to QAT if false. */
u8 drop;
/* Congestion domain. For future use. */
u8 cgd;
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
/* 0 for NAT-T unsupported, 1 for NAT-T supported */
u8 is_udp;
/* reserved */
u8 reserved;
/* NAT-T UDP port number. Only valid in case NAT-T supported */
u16 udp_port;
};
#pragma pack(1)
/* Delete allowlist entry in IES */
struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
u32 rule_id;
};
#pragma pack()
/* Response from IES to allowlist operations */
struct virtchnl_ipsec_sp_cfg_resp {
u32 rule_id;
};
struct virtchnl_ipsec_sa_cfg_resp {
u32 sa_handle;
};
#define INLINE_IPSEC_EVENT_RESET 0x1
#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
struct virtchnl_ipsec_event {
u32 ipsec_event_data;
};
#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
struct virtchnl_ipsec_status {
u32 status;
};
struct virtchnl_ipsec_resp {
u32 resp;
};
/* Internal message descriptor for VF <-> IPsec communication */
struct inline_ipsec_msg {
u16 ipsec_opcode;
u16 req_id;
union {
/* IPsec request */
struct virtchnl_ipsec_sa_cfg sa_cfg[0];
struct virtchnl_ipsec_sp_cfg sp_cfg[0];
struct virtchnl_ipsec_sa_update sa_update[0];
struct virtchnl_ipsec_sa_destroy sa_destroy[0];
struct virtchnl_ipsec_sp_destroy sp_destroy[0];
/* IPsec response */
struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
struct virtchnl_ipsec_cap ipsec_cap[0];
struct virtchnl_ipsec_status ipsec_status[0];
/* response to del_sa, del_sp, update_sa */
struct virtchnl_ipsec_resp ipsec_resp[0];
/* IPsec event (no req_id is required) */
struct virtchnl_ipsec_event event[0];
/* Reserved */
struct virtchnl_ipsec_sa_read sa_read[0];
} ipsec_data;
};
static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
{
u16 valid_len = sizeof(struct inline_ipsec_msg);
switch (opcode) {
case INLINE_IPSEC_OP_GET_CAP:
case INLINE_IPSEC_OP_GET_STATUS:
break;
case INLINE_IPSEC_OP_SA_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
break;
case INLINE_IPSEC_OP_SP_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
break;
case INLINE_IPSEC_OP_SA_UPDATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_update);
break;
case INLINE_IPSEC_OP_SA_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
break;
case INLINE_IPSEC_OP_SP_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
break;
/* Only for msg length calculation of response to VF in case of
* inline ipsec failure.
*/
case INLINE_IPSEC_OP_RESP:
valid_len += sizeof(struct virtchnl_ipsec_resp);
break;
default:
valid_len = 0;
break;
}
return valid_len;
}
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_112_127
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_16_31
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_32_47
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_48_63
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_64_79
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_80_95
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_deq_ca_##name, flags) \
SSO_DEQ_CA(cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_burst_##name, \
cn10k_sso_hws_deq_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_burst_##name, \
cn10k_sso_hws_reas_deq_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_DEQ_CA_SEG(cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_ca_seg_burst_##name, \
cn10k_sso_hws_deq_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_DEQ_TMO_CA(cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_DEQ_TMO_CA_SEG(cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, \
flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_deq_tmo_ca_seg_##name, flags) \
SSO_CMN_DEQ_BURST(cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name, \
cn10k_sso_hws_reas_deq_tmo_ca_seg_##name, flags | NIX_RX_REAS_F)
NIX_RX_FASTPATH_MODES_96_111
#undef R

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) SSO_DEQ_CA(cn9k_sso_hws_deq_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_deq_ca_burst_##name, \
cn9k_sso_hws_deq_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) SSO_DEQ_CA_SEG(cn9k_sso_hws_deq_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_deq_ca_seg_burst_##name, \
cn9k_sso_hws_deq_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) SSO_DEQ_TMO_CA(cn9k_sso_hws_deq_tmo_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_deq_tmo_ca_burst_##name, \
cn9k_sso_hws_deq_tmo_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DEQ_TMO_CA_SEG(cn9k_sso_hws_deq_tmo_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_deq_tmo_ca_seg_burst_##name, \
cn9k_sso_hws_deq_tmo_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) SSO_DUAL_DEQ_CA(cn9k_sso_hws_dual_deq_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_dual_deq_ca_burst_##name, \
cn9k_sso_hws_dual_deq_ca_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_DUAL_DEQ_CA_SEG(cn9k_sso_hws_dual_deq_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
#define R(name, flags) \
SSO_CMN_DEQ_BURST(cn9k_sso_hws_dual_deq_ca_seg_burst_##name, \
cn9k_sso_hws_dual_deq_ca_seg_##name, flags)
NIX_RX_FASTPATH_MODES_0_15
#undef R

Some files were not shown because too many files have changed in this diff Show More