f-stack lib support dpdk-22.11.3.

This commit is contained in:
jfb8856606 2023-09-13 12:23:25 +00:00
parent badf629eb9
commit 99b1fff09d
2 changed files with 44 additions and 49 deletions

View File

@ -202,7 +202,7 @@ check_all_ports_link_status(void)
printf("Port %d Link Up - speed %u "
"Mbps - %s\n", (int)portid,
(unsigned)link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
} else {
printf("Port %d Link Down\n", (int)portid);
@ -546,14 +546,14 @@ set_rss_table(uint16_t port_id, uint16_t reta_size, uint16_t nb_queues)
return;
}
int reta_conf_size = RTE_MAX(1, reta_size / RTE_RETA_GROUP_SIZE);
int reta_conf_size = RTE_MAX(1, reta_size / RTE_ETH_RETA_GROUP_SIZE);
struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
/* config HW indirection table */
unsigned i, j, hash=0;
for (i = 0; i < reta_conf_size; i++) {
reta_conf[i].mask = ~0ULL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
reta_conf[i].reta[j] = hash++ % nb_queues;
}
}
@ -627,8 +627,8 @@ init_port_start(void)
addr.addr_bytes, RTE_ETHER_ADDR_LEN);
/* Set RSS mode */
uint64_t default_rss_hf = ETH_RSS_PROTO_MASK;
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
uint64_t default_rss_hf = RTE_ETH_RSS_PROTO_MASK;
port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf = default_rss_hf;
if (dev_info.hash_key_size == 52) {
rsskey = default_rsskey_52bytes;
@ -642,27 +642,27 @@ init_port_start(void)
port_conf.rx_adv_conf.rss_conf.rss_key_len = rsskey_len;
port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
ETH_RSS_PROTO_MASK) {
RTE_ETH_RSS_PROTO_MASK) {
printf("Port %u modified RSS hash function based on hardware support,"
"requested:%#"PRIx64" configured:%#"PRIx64"\n",
port_id, default_rss_hf,
port_conf.rx_adv_conf.rss_conf.rss_hf);
}
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
}
/* Set Rx VLAN stripping */
if (ff_global_cfg.dpdk.vlan_strip) {
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
/* Enable HW CRC stripping */
port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
/* FIXME: Enable TCP LRO ?*/
#if 0
@ -674,25 +674,25 @@ init_port_start(void)
#endif
/* Set Rx checksum checking */
if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)) {
if ((dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) &&
(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) {
printf("RX checksum offload supported\n");
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
pconf->hw_features.rx_csum = 1;
}
if (ff_global_cfg.dpdk.tx_csum_offoad_skip == 0) {
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
printf("TX ip checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
pconf->hw_features.tx_csum_ip = 1;
}
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)) {
if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
printf("TX TCP&UDP checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
pconf->hw_features.tx_csum_l4 = 1;
}
} else {
@ -700,9 +700,9 @@ init_port_start(void)
}
if (ff_global_cfg.dpdk.tso) {
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
printf("TSO is supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO;
port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
pconf->hw_features.tx_tso = 1;
}
else {
@ -1060,12 +1060,12 @@ init_flow(uint16_t port_id, uint16_t tcp_port) {
* using case of FDIR is (but not limited to), using multiple processes to
* listen on different ports.
*
* This function can be called either in FSTACK or in end-application.
* This function can be called either in FSTACK or in end-application.
*
* Example:
* Given 2 fstack instances A and B. Instance A listens on port 80, and
* instance B listens on port 81. We want to process the traffic to port 80
* on rx queue 0, and the traffic to port 81 on rx queue 1.
* on rx queue 0, and the traffic to port 81 on rx queue 1.
* // port 80 rx queue 0
* ret = fdir_add_tcp_flow(port_id, 0, FF_FLOW_INGRESS, 0, 80);
* // port 81 rx queue 1
@ -1074,27 +1074,27 @@ init_flow(uint16_t port_id, uint16_t tcp_port) {
#define FF_FLOW_EGRESS 1
#define FF_FLOW_INGRESS 2
/**
* Create a flow rule that moves packets with matching src and dest tcp port
* to the target queue.
*
* Create a flow rule that moves packets with matching src and dest tcp port
* to the target queue.
*
* This function uses general flow rules and doesn't rely on the flow_isolation
* that not all the FDIR capable NIC support.
*
* @param port_id
* The selected port.
* @param queue
* @param queue
* The target queue.
* @param dir
* The direction of the traffic.
* 1 for egress, 2 for ingress and sum(1+2) for both.
* @param tcp_sport
* @param dir
* The direction of the traffic.
* 1 for egress, 2 for ingress and sum(1+2) for both.
* @param tcp_sport
* The src tcp port to match.
* @param tcp_dport
* The dest tcp port to match.
*
*/
static int
fdir_add_tcp_flow(uint16_t port_id, uint16_t queue, uint16_t dir,
fdir_add_tcp_flow(uint16_t port_id, uint16_t queue, uint16_t dir,
uint16_t tcp_sport, uint16_t tcp_dport)
{
struct rte_flow_attr attr;
@ -1115,7 +1115,7 @@ fdir_add_tcp_flow(uint16_t port_id, uint16_t queue, uint16_t dir,
*/
memset(&attr, 0, sizeof(struct rte_flow_attr));
attr.ingress = ((dir & FF_FLOW_INGRESS) > 0);
attr.egress = ((dir & FF_FLOW_EGRESS) > 0);
attr.egress = ((dir & FF_FLOW_EGRESS) > 0);
/*
* create the action sequence.
@ -1134,7 +1134,7 @@ fdir_add_tcp_flow(uint16_t port_id, uint16_t queue, uint16_t dir,
memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp));
memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp));
tcp_spec.hdr.src_port = htons(tcp_sport);
tcp_mask.hdr.src_port = (tcp_sport == 0 ? 0: 0xffff);
tcp_mask.hdr.src_port = (tcp_sport == 0 ? 0: 0xffff);
tcp_spec.hdr.dst_port = htons(tcp_dport);
tcp_mask.hdr.dst_port = (tcp_dport == 0 ? 0: 0xffff);
flow_pattern[2].type = RTE_FLOW_ITEM_TYPE_TCP;
@ -1148,7 +1148,7 @@ fdir_add_tcp_flow(uint16_t port_id, uint16_t queue, uint16_t dir,
return (1);
flow = rte_flow_create(port_id, &attr, flow_pattern, flow_action, &rfe);
if (!flow)
if (!flow)
return port_flow_complain(&rfe);
return (0);

View File

@ -160,7 +160,7 @@ kni_config_network_interface(uint16_t port_id, uint8_t if_up)
}
if (ret < 0)
printf("Failed to Configure network interface of %d %s\n",
printf("Failed to Configure network interface of %d %s\n",
port_id, if_up ? "up" : "down");
return ret;
@ -477,7 +477,7 @@ ff_kni_alloc(uint16_t port_id, unsigned socket_id,
struct rte_kni_ops ops;
struct rte_eth_dev_info dev_info;
const struct rte_pci_device *pci_dev;
const struct rte_bus *bus = NULL;
int ret;
kni_stat[port_id] = (struct kni_interface_stats*)rte_zmalloc(
"kni:stat_lcore",
@ -489,7 +489,7 @@ ff_kni_alloc(uint16_t port_id, unsigned socket_id,
/* only support one kni */
memset(&conf, 0, sizeof(conf));
snprintf(conf.name, RTE_KNI_NAMESIZE, "veth%u", port_id);
snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u", port_id);
conf.core_id = rte_lcore_id();
conf.force_bind = 1;
conf.group_id = port_id;
@ -498,16 +498,11 @@ ff_kni_alloc(uint16_t port_id, unsigned socket_id,
conf.mbuf_size = mtu + KNI_ENET_HEADER_SIZE + KNI_ENET_FCS_SIZE;
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
if (dev_info.device)
bus = rte_bus_find_by_device(dev_info.device);
if (bus && !strcmp(bus->name, "pci")) {
pci_dev = RTE_DEV_TO_PCI(dev_info.device);
conf.addr = pci_dev->addr;
conf.id = pci_dev->id;
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0) {
rte_panic("kni get dev info %u failed!\n", port_id);
}
/* Get the interface default mac address */
rte_eth_macaddr_get(port_id,
(struct rte_ether_addr *)&conf.mac_addr);
@ -534,7 +529,7 @@ ff_kni_alloc(uint16_t port_id, unsigned socket_id,
snprintf((char*)ring_name, RTE_KNI_NAMESIZE, "kni_ring_%u", port_id);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
kni_rp[port_id] = rte_ring_create(ring_name, ring_queue_size,
kni_rp[port_id] = rte_ring_create(ring_name, ring_queue_size,
socket_id, RING_F_SC_DEQ);
if (rte_ring_lookup(ring_name) != kni_rp[port_id])