2017-04-21 10:43:26 +00:00
|
|
|
|
/*
|
2021-09-18 08:05:45 +00:00
|
|
|
|
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company.
|
2017-04-21 10:43:26 +00:00
|
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
|
*
|
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
|
*
|
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
|
|
|
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
|
|
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <arpa/inet.h>
|
2019-07-17 09:31:47 +00:00
|
|
|
|
#include <netinet/icmp6.h>
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
#include <rte_config.h>
|
|
|
|
|
#include <rte_ether.h>
|
2018-05-15 09:49:22 +00:00
|
|
|
|
#include <rte_bus_pci.h>
|
2017-04-21 10:43:26 +00:00
|
|
|
|
#include <rte_ethdev.h>
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifdef FF_KNI_KNI
|
2017-04-21 10:43:26 +00:00
|
|
|
|
#include <rte_kni.h>
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#endif
|
2017-04-21 10:43:26 +00:00
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
|
#include <rte_ring.h>
|
|
|
|
|
#include <rte_ip.h>
|
|
|
|
|
#include <rte_tcp.h>
|
|
|
|
|
#include <rte_udp.h>
|
|
|
|
|
|
|
|
|
|
#include "ff_dpdk_kni.h"
|
|
|
|
|
#include "ff_config.h"
|
|
|
|
|
|
2024-10-10 09:48:40 +00:00
|
|
|
|
#ifndef IPPROTO_OSPFIGP
|
|
|
|
|
#define IPPROTO_OSPFIGP 89 /**< OSPFIGP */
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
/* Callback for request of changing MTU */
|
|
|
|
|
/* Total octets in ethernet header */
|
|
|
|
|
#define KNI_ENET_HEADER_SIZE 14
|
|
|
|
|
|
|
|
|
|
/* Total octets in the FCS */
|
|
|
|
|
#define KNI_ENET_FCS_SIZE 4
|
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifndef RTE_KNI_NAMESIZE
|
|
|
|
|
#define RTE_KNI_NAMESIZE 16
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
#define set_bit(n, m) (n | magic_bits[m])
|
|
|
|
|
#define clear_bit(n, m) (n & (~magic_bits[m]))
|
|
|
|
|
#define get_bit(n, m) (n & magic_bits[m])
|
|
|
|
|
|
|
|
|
|
static const int magic_bits[8] = {
|
|
|
|
|
0x80, 0x40, 0x20, 0x10,
|
|
|
|
|
0x8, 0x4, 0x2, 0x1
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static unsigned char *udp_port_bitmap = NULL;
|
|
|
|
|
static unsigned char *tcp_port_bitmap = NULL;
|
|
|
|
|
|
|
|
|
|
/* Structure type for recording kni interface specific stats */
|
|
|
|
|
struct kni_interface_stats {
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifdef FF_KNI_KNI
|
2017-04-21 10:43:26 +00:00
|
|
|
|
struct rte_kni *kni;
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* port id of dev or virtio_user */
|
|
|
|
|
uint16_t port_id;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
/* number of pkts received from NIC, and sent to KNI */
|
|
|
|
|
uint64_t rx_packets;
|
|
|
|
|
|
|
|
|
|
/* number of pkts received from NIC, but failed to send to KNI */
|
|
|
|
|
uint64_t rx_dropped;
|
|
|
|
|
|
|
|
|
|
/* number of pkts received from KNI, and sent to NIC */
|
|
|
|
|
uint64_t tx_packets;
|
|
|
|
|
|
|
|
|
|
/* number of pkts received from KNI, but failed to send to NIC */
|
|
|
|
|
uint64_t tx_dropped;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct rte_ring **kni_rp;
|
|
|
|
|
struct kni_interface_stats **kni_stat;
|
|
|
|
|
|
2024-10-10 09:48:40 +00:00
|
|
|
|
struct kni_ratelimit kni_rate_limt = {0, 0, 0};
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
static void
|
|
|
|
|
set_bitmap(uint16_t port, unsigned char *bitmap)
|
|
|
|
|
{
|
|
|
|
|
port = htons(port);
|
|
|
|
|
unsigned char *p = bitmap + port/8;
|
|
|
|
|
*p = set_bit(*p, port % 8);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
get_bitmap(uint16_t port, unsigned char *bitmap)
|
|
|
|
|
{
|
|
|
|
|
unsigned char *p = bitmap + port/8;
|
|
|
|
|
return get_bit(*p, port % 8) > 0 ? 1 : 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
kni_set_bitmap(const char *p, unsigned char *port_bitmap)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
const char *head, *tail, *tail_num;
|
|
|
|
|
if(!p)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
head = p;
|
|
|
|
|
while (1) {
|
|
|
|
|
tail = strstr(head, ",");
|
|
|
|
|
tail_num = strstr(head, "-");
|
|
|
|
|
if(tail_num && (!tail || tail_num < tail - 1)) {
|
|
|
|
|
for(i = atoi(head); i <= atoi(tail_num + 1); ++i) {
|
|
|
|
|
set_bitmap(i, port_bitmap);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
set_bitmap(atoi(head), port_bitmap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if(!tail)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
head = tail + 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifdef FF_KNI_KNI
|
2017-04-21 10:43:26 +00:00
|
|
|
|
/* Currently we don't support change mtu. */
|
|
|
|
|
static int
|
2018-05-15 09:49:22 +00:00
|
|
|
|
kni_change_mtu(uint16_t port_id, unsigned new_mtu)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2018-05-15 09:49:22 +00:00
|
|
|
|
kni_config_network_interface(uint16_t port_id, uint8_t if_up)
|
2017-05-02 10:05:26 +00:00
|
|
|
|
{
|
2017-04-21 10:43:26 +00:00
|
|
|
|
int ret = 0;
|
|
|
|
|
|
2019-03-14 09:17:58 +00:00
|
|
|
|
if (!rte_eth_dev_is_valid_port(port_id)) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
printf("Invalid port id %d\n", port_id);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printf("Configure network interface of %d %s\n",
|
|
|
|
|
port_id, if_up ? "up" : "down");
|
|
|
|
|
|
|
|
|
|
ret = (if_up) ?
|
|
|
|
|
rte_eth_dev_set_link_up(port_id) :
|
|
|
|
|
rte_eth_dev_set_link_down(port_id);
|
|
|
|
|
|
2023-09-15 07:56:37 +00:00
|
|
|
|
/*
|
|
|
|
|
* Some NIC drivers will crash in secondary process after config kni , Such as ENA with DPDK-21.22.3.
|
|
|
|
|
* If you meet this crash, you can try disable the code below and return 0 directly.
|
|
|
|
|
* Or run primary first, then config kni interface in kernel, and run secondary processes last.
|
|
|
|
|
*/
|
2017-05-02 10:05:26 +00:00
|
|
|
|
if(-ENOTSUP == ret) {
|
|
|
|
|
if (if_up != 0) {
|
|
|
|
|
/* Configure network interface up */
|
|
|
|
|
rte_eth_dev_stop(port_id);
|
|
|
|
|
ret = rte_eth_dev_start(port_id);
|
|
|
|
|
} else {
|
|
|
|
|
/* Configure network interface down */
|
|
|
|
|
rte_eth_dev_stop(port_id);
|
|
|
|
|
ret = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
if (ret < 0)
|
2023-09-13 12:23:25 +00:00
|
|
|
|
printf("Failed to Configure network interface of %d %s\n",
|
2017-05-02 10:05:26 +00:00
|
|
|
|
port_id, if_up ? "up" : "down");
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-14 09:17:58 +00:00
|
|
|
|
static void
|
2020-06-18 16:55:50 +00:00
|
|
|
|
print_ethaddr(const char *name, struct rte_ether_addr *mac_addr)
|
2019-03-14 09:17:58 +00:00
|
|
|
|
{
|
2020-06-18 16:55:50 +00:00
|
|
|
|
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
|
|
|
|
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
|
2019-03-14 09:17:58 +00:00
|
|
|
|
printf("\t%s%s\n", name, buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Callback for request of configuring mac address */
|
|
|
|
|
static int
|
|
|
|
|
kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
if (!rte_eth_dev_is_valid_port(port_id)) {
|
|
|
|
|
printf("Invalid port id %d\n", port_id);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
print_ethaddr("Address:", (struct rte_ether_addr *)mac_addr);
|
2019-03-14 09:17:58 +00:00
|
|
|
|
|
|
|
|
|
ret = rte_eth_dev_default_mac_addr_set(port_id,
|
2020-06-18 16:55:50 +00:00
|
|
|
|
(struct rte_ether_addr *)mac_addr);
|
2019-03-14 09:17:58 +00:00
|
|
|
|
if (ret < 0)
|
|
|
|
|
printf("Failed to config mac_addr for port %d\n", port_id);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#endif
|
2019-03-14 09:17:58 +00:00
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
static int
|
2018-05-15 09:49:22 +00:00
|
|
|
|
kni_process_tx(uint16_t port_id, uint16_t queue_id,
|
2017-04-21 10:43:26 +00:00
|
|
|
|
struct rte_mbuf **pkts_burst, unsigned count)
|
|
|
|
|
{
|
|
|
|
|
/* read packet from kni ring(phy port) and transmit to kni */
|
2024-10-17 08:23:14 +00:00
|
|
|
|
uint16_t nb_tx, nb_to_tx, nb_kni_tx = 0;
|
2018-05-15 09:49:22 +00:00
|
|
|
|
nb_tx = rte_ring_dequeue_burst(kni_rp[port_id], (void **)pkts_burst, count, NULL);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2024-10-10 09:48:40 +00:00
|
|
|
|
/*
|
|
|
|
|
* The total ratelimit forwarded to the kernel, may a few more packets being sent, but it doesn’t matter,
|
|
|
|
|
* If there are too many processes, there is also the possibility that the control packet will be ratelimited.
|
|
|
|
|
*/
|
|
|
|
|
if (ff_global_cfg.kni.kernel_packets_ratelimit) {
|
|
|
|
|
if (likely(kni_rate_limt.kernel_packets < ff_global_cfg.kni.kernel_packets_ratelimit)) {
|
|
|
|
|
nb_to_tx = nb_tx;
|
|
|
|
|
} else {
|
|
|
|
|
nb_to_tx = 0;
|
|
|
|
|
}
|
|
|
|
|
kni_rate_limt.kernel_packets += nb_tx;
|
|
|
|
|
} else {
|
|
|
|
|
nb_to_tx = nb_tx;
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifdef FF_KNI_KNI
|
|
|
|
|
if (ff_global_cfg.kni.type == KNI_TYPE_KNI) {
|
|
|
|
|
/* NB.
|
|
|
|
|
* if nb_tx is 0,it must call rte_kni_tx_burst
|
|
|
|
|
* must Call regularly rte_kni_tx_burst(kni, NULL, 0).
|
|
|
|
|
* detail https://embedded.communities.intel.com/thread/6668
|
|
|
|
|
*/
|
2024-10-10 09:48:40 +00:00
|
|
|
|
nb_kni_tx = rte_kni_tx_burst(kni_stat[port_id]->kni, pkts_burst, nb_to_tx);
|
2023-09-19 10:28:57 +00:00
|
|
|
|
rte_kni_handle_request(kni_stat[port_id]->kni);
|
|
|
|
|
} else if (ff_global_cfg.kni.type == KNI_TYPE_VIRTIO)
|
|
|
|
|
#endif
|
|
|
|
|
{
|
2024-10-10 09:48:40 +00:00
|
|
|
|
nb_kni_tx = rte_eth_tx_burst(kni_stat[port_id]->port_id, 0, pkts_burst, nb_to_tx);
|
2023-09-19 10:28:57 +00:00
|
|
|
|
}
|
2024-10-17 08:23:14 +00:00
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
if(nb_kni_tx < nb_tx) {
|
|
|
|
|
uint16_t i;
|
|
|
|
|
for(i = nb_kni_tx; i < nb_tx; ++i)
|
|
|
|
|
rte_pktmbuf_free(pkts_burst[i]);
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->rx_dropped += (nb_tx - nb_kni_tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->rx_packets += nb_kni_tx;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2018-05-15 09:49:22 +00:00
|
|
|
|
kni_process_rx(uint16_t port_id, uint16_t queue_id,
|
2017-04-21 10:43:26 +00:00
|
|
|
|
struct rte_mbuf **pkts_burst, unsigned count)
|
|
|
|
|
{
|
2023-09-26 03:24:22 +00:00
|
|
|
|
uint16_t nb_kni_rx = 0, nb_rx;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#ifdef FF_KNI_KNI
|
|
|
|
|
if (ff_global_cfg.kni.type == KNI_TYPE_KNI) {
|
|
|
|
|
/* read packet from kni, and transmit to phy port */
|
|
|
|
|
nb_kni_rx = rte_kni_rx_burst(kni_stat[port_id]->kni, pkts_burst, count);
|
|
|
|
|
} else if (ff_global_cfg.kni.type == KNI_TYPE_VIRTIO)
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
nb_kni_rx = rte_eth_rx_burst(kni_stat[port_id]->port_id, 0, pkts_burst, count);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
if (nb_kni_rx > 0) {
|
|
|
|
|
nb_rx = rte_eth_tx_burst(port_id, queue_id, pkts_burst, nb_kni_rx);
|
|
|
|
|
if (nb_rx < nb_kni_rx) {
|
|
|
|
|
uint16_t i;
|
|
|
|
|
for(i = nb_rx; i < nb_kni_rx; ++i)
|
|
|
|
|
rte_pktmbuf_free(pkts_burst[i]);
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->tx_dropped += (nb_kni_rx - nb_rx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->tx_packets += nb_rx;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static enum FilterReturn
|
|
|
|
|
protocol_filter_l4(uint16_t port, unsigned char *bitmap)
|
|
|
|
|
{
|
|
|
|
|
if(get_bitmap(port, bitmap)) {
|
2017-05-02 10:05:26 +00:00
|
|
|
|
return FILTER_KNI;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static enum FilterReturn
|
|
|
|
|
protocol_filter_tcp(const void *data, uint16_t len)
|
|
|
|
|
{
|
2020-06-18 16:55:50 +00:00
|
|
|
|
if (len < sizeof(struct rte_tcp_hdr))
|
2017-04-21 10:43:26 +00:00
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
const struct rte_tcp_hdr *hdr;
|
|
|
|
|
hdr = (const struct rte_tcp_hdr *)data;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
return protocol_filter_l4(hdr->dst_port, tcp_port_bitmap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static enum FilterReturn
|
|
|
|
|
protocol_filter_udp(const void* data,uint16_t len)
|
|
|
|
|
{
|
2020-06-18 16:55:50 +00:00
|
|
|
|
if (len < sizeof(struct rte_udp_hdr))
|
2017-04-21 10:43:26 +00:00
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
const struct rte_udp_hdr *hdr;
|
|
|
|
|
hdr = (const struct rte_udp_hdr *)data;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
return protocol_filter_l4(hdr->dst_port, udp_port_bitmap);
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-12 12:56:01 +00:00
|
|
|
|
#ifdef INET6
|
|
|
|
|
/*
|
|
|
|
|
* https://www.iana.org/assignments/ipv6-parameters/ipv6-parameters.xhtml
|
|
|
|
|
*/
|
|
|
|
|
#ifndef IPPROTO_HIP
|
|
|
|
|
#define IPPROTO_HIP 139
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifndef IPPROTO_SHIM6
|
|
|
|
|
#define IPPROTO_SHIM6 140
|
|
|
|
|
#endif
|
2019-07-25 03:27:30 +00:00
|
|
|
|
|
|
|
|
|
#ifndef IPPROTO_MH
|
|
|
|
|
#define IPPROTO_MH 135
|
|
|
|
|
#endif
|
2019-07-12 12:56:01 +00:00
|
|
|
|
static int
|
|
|
|
|
get_ipv6_hdr_len(uint8_t *proto, void *data, uint16_t len)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
2019-07-12 12:56:01 +00:00
|
|
|
|
int ext_hdr_len = 0;
|
|
|
|
|
|
|
|
|
|
switch (*proto) {
|
|
|
|
|
case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: case IPPROTO_DSTOPTS:
|
|
|
|
|
case IPPROTO_MH: case IPPROTO_HIP: case IPPROTO_SHIM6:
|
|
|
|
|
ext_hdr_len = *((uint8_t *)data + 1) + 1;
|
|
|
|
|
break;
|
|
|
|
|
case IPPROTO_FRAGMENT:
|
|
|
|
|
ext_hdr_len = 8;
|
|
|
|
|
break;
|
|
|
|
|
case IPPROTO_AH:
|
|
|
|
|
ext_hdr_len = (*((uint8_t *)data + 1) + 2) * 4;
|
|
|
|
|
break;
|
|
|
|
|
case IPPROTO_NONE:
|
|
|
|
|
#ifdef FF_IPSEC
|
|
|
|
|
case IPPROTO_ESP:
|
|
|
|
|
//proto = *((uint8_t *)data + len - 1 - 4);
|
|
|
|
|
//ext_hdr_len = len;
|
|
|
|
|
#endif
|
|
|
|
|
default:
|
|
|
|
|
return ext_hdr_len;
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2019-07-12 12:56:01 +00:00
|
|
|
|
if (ext_hdr_len >= len) {
|
|
|
|
|
return len;
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2019-07-12 12:56:01 +00:00
|
|
|
|
*proto = *((uint8_t *)data);
|
|
|
|
|
ext_hdr_len += get_ipv6_hdr_len(proto, data + ext_hdr_len, len - ext_hdr_len);
|
|
|
|
|
|
|
|
|
|
return ext_hdr_len;
|
|
|
|
|
}
|
2019-07-17 09:31:47 +00:00
|
|
|
|
|
|
|
|
|
static enum FilterReturn
|
|
|
|
|
protocol_filter_icmp6(void *data, uint16_t len)
|
|
|
|
|
{
|
|
|
|
|
if (len < sizeof(struct icmp6_hdr))
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
|
|
|
|
const struct icmp6_hdr *hdr;
|
|
|
|
|
hdr = (const struct icmp6_hdr *)data;
|
|
|
|
|
|
|
|
|
|
if (hdr->icmp6_type >= ND_ROUTER_SOLICIT && hdr->icmp6_type <= ND_REDIRECT)
|
|
|
|
|
return FILTER_NDP;
|
|
|
|
|
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
}
|
2019-07-12 12:56:01 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
static enum FilterReturn
|
|
|
|
|
protocol_filter_ip(const void *data, uint16_t len, uint16_t eth_frame_type)
|
|
|
|
|
{
|
|
|
|
|
uint8_t proto;
|
|
|
|
|
int hdr_len;
|
|
|
|
|
void *next;
|
|
|
|
|
uint16_t next_len;
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
if (eth_frame_type == RTE_ETHER_TYPE_IPV4) {
|
|
|
|
|
if(len < sizeof(struct rte_ipv4_hdr))
|
2019-07-12 12:56:01 +00:00
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
const struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)data;
|
2019-07-12 12:56:01 +00:00
|
|
|
|
hdr_len = (hdr->version_ihl & 0x0f) << 2;
|
|
|
|
|
if (len < hdr_len)
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
|
|
|
|
proto = hdr->next_proto_id;
|
|
|
|
|
#ifdef INET6
|
2020-06-18 16:55:50 +00:00
|
|
|
|
} else if(eth_frame_type == RTE_ETHER_TYPE_IPV6) {
|
|
|
|
|
if(len < sizeof(struct rte_ipv6_hdr))
|
2019-07-12 12:56:01 +00:00
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
|
2020-06-18 16:55:50 +00:00
|
|
|
|
hdr_len = sizeof(struct rte_ipv6_hdr);
|
|
|
|
|
proto = ((struct rte_ipv6_hdr *)data)->proto;
|
2019-07-12 12:56:01 +00:00
|
|
|
|
hdr_len += get_ipv6_hdr_len(&proto, (void *)data + hdr_len, len - hdr_len);
|
|
|
|
|
|
|
|
|
|
if (len < hdr_len)
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
#endif
|
|
|
|
|
} else {
|
2017-11-21 03:20:14 +00:00
|
|
|
|
return FILTER_UNKNOWN;
|
2019-07-12 12:56:01 +00:00
|
|
|
|
}
|
2017-11-21 03:20:14 +00:00
|
|
|
|
|
2019-07-12 12:56:01 +00:00
|
|
|
|
next = (void *)data + hdr_len;
|
|
|
|
|
next_len = len - hdr_len;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2019-07-12 12:56:01 +00:00
|
|
|
|
switch (proto) {
|
2024-10-10 09:48:40 +00:00
|
|
|
|
#ifdef FF_KNI
|
|
|
|
|
/* The opsf protocol is forwarded to kni and the ratelimited separately */
|
|
|
|
|
case IPPROTO_OSPFIGP:
|
|
|
|
|
return FILTER_OSPF;
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
case IPPROTO_TCP:
|
2019-07-17 09:31:47 +00:00
|
|
|
|
#ifdef FF_KNI
|
|
|
|
|
if (!enable_kni)
|
|
|
|
|
#endif
|
2024-10-10 09:48:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
return protocol_filter_tcp(next, next_len);
|
2024-10-10 09:48:40 +00:00
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
case IPPROTO_UDP:
|
2019-07-17 09:31:47 +00:00
|
|
|
|
#ifdef FF_KNI
|
|
|
|
|
if (!enable_kni)
|
|
|
|
|
#endif
|
2024-10-10 09:48:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
return protocol_filter_udp(next, next_len);
|
2024-10-10 09:48:40 +00:00
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
case IPPROTO_IPIP:
|
2020-06-18 16:55:50 +00:00
|
|
|
|
return protocol_filter_ip(next, next_len, RTE_ETHER_TYPE_IPV4);
|
2019-07-12 12:56:01 +00:00
|
|
|
|
#ifdef INET6
|
|
|
|
|
case IPPROTO_IPV6:
|
2020-06-18 16:55:50 +00:00
|
|
|
|
return protocol_filter_ip(next, next_len, RTE_ETHER_TYPE_IPV6);
|
2019-07-17 09:31:47 +00:00
|
|
|
|
case IPPROTO_ICMPV6:
|
|
|
|
|
return protocol_filter_icmp6(next, next_len);
|
2019-07-12 12:56:01 +00:00
|
|
|
|
#endif
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return FILTER_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
enum FilterReturn
|
2019-07-12 12:56:01 +00:00
|
|
|
|
ff_kni_proto_filter(const void *data, uint16_t len, uint16_t eth_frame_type)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
2019-07-12 12:56:01 +00:00
|
|
|
|
return protocol_filter_ip(data, len, eth_frame_type);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2023-09-19 10:28:57 +00:00
|
|
|
|
ff_kni_init(uint16_t nb_ports, int type, const char *tcp_ports, const char *udp_ports)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
|
kni_stat = rte_zmalloc("kni:stat",
|
|
|
|
|
sizeof(struct kni_interface_stats *) * nb_ports,
|
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
if (kni_stat == NULL)
|
|
|
|
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(1 (struct netio_kni_stat *)) "
|
|
|
|
|
"failed\n");
|
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
if (type == KNI_TYPE_KNI) {
|
|
|
|
|
#ifdef FF_KNI_KNI
|
|
|
|
|
rte_kni_init(nb_ports);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint16_t lcoreid = rte_lcore_id();
|
|
|
|
|
char name_buf[RTE_RING_NAMESIZE];
|
|
|
|
|
snprintf(name_buf, RTE_RING_NAMESIZE, "kni::ring_%d", lcoreid);
|
|
|
|
|
kni_rp = rte_zmalloc(name_buf,
|
|
|
|
|
sizeof(struct rte_ring *) * nb_ports,
|
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
if (kni_rp == NULL) {
|
|
|
|
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%s (struct rte_ring*)) "
|
|
|
|
|
"failed\n", name_buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
snprintf(name_buf, RTE_RING_NAMESIZE, "kni:tcp_port_bitmap_%d", lcoreid);
|
|
|
|
|
tcp_port_bitmap = rte_zmalloc("kni:tcp_port_bitmap", 8192,
|
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
if (tcp_port_bitmap == NULL) {
|
|
|
|
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%s (tcp_port_bitmap)) "
|
|
|
|
|
"failed\n", name_buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
snprintf(name_buf, RTE_RING_NAMESIZE, "kni:udp_port_bitmap_%d", lcoreid);
|
|
|
|
|
udp_port_bitmap = rte_zmalloc("kni:udp_port_bitmap", 8192,
|
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
if (udp_port_bitmap == NULL) {
|
|
|
|
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%s (udp_port_bitmap)) "
|
|
|
|
|
"failed\n",name_buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(tcp_port_bitmap, 0, 8192);
|
|
|
|
|
memset(udp_port_bitmap, 0, 8192);
|
|
|
|
|
|
|
|
|
|
kni_set_bitmap(tcp_ports, tcp_port_bitmap);
|
|
|
|
|
kni_set_bitmap(udp_ports, udp_port_bitmap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2023-09-19 10:28:57 +00:00
|
|
|
|
ff_kni_alloc(uint16_t port_id, unsigned socket_id, int type, int port_idx,
|
2017-06-12 08:12:22 +00:00
|
|
|
|
struct rte_mempool *mbuf_pool, unsigned ring_queue_size)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2024-10-11 11:32:10 +00:00
|
|
|
|
struct rte_ether_addr addr = {{0}};
|
2023-09-13 12:23:25 +00:00
|
|
|
|
int ret;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
kni_stat[port_id] = (struct kni_interface_stats*)rte_zmalloc(
|
|
|
|
|
"kni:stat_lcore",
|
|
|
|
|
sizeof(struct kni_interface_stats),
|
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
if (kni_stat[port_id] == NULL) {
|
2017-04-21 10:43:26 +00:00
|
|
|
|
rte_panic("rte_zmalloc kni_interface_stats failed\n");
|
2023-09-19 10:28:57 +00:00
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
kni_stat[port_id]->rx_packets = 0;
|
|
|
|
|
kni_stat[port_id]->rx_dropped = 0;
|
|
|
|
|
kni_stat[port_id]->tx_packets = 0;
|
|
|
|
|
kni_stat[port_id]->tx_dropped = 0;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
memset(&dev_info, 0, sizeof(dev_info));
|
2023-09-13 12:23:25 +00:00
|
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
|
if (ret != 0) {
|
|
|
|
|
rte_panic("kni get dev info %u failed!\n", port_id);
|
2019-03-14 09:17:58 +00:00
|
|
|
|
}
|
2023-09-13 12:23:25 +00:00
|
|
|
|
|
2019-03-14 09:17:58 +00:00
|
|
|
|
/* Get the interface default mac address */
|
|
|
|
|
rte_eth_macaddr_get(port_id,
|
2023-09-19 10:28:57 +00:00
|
|
|
|
(struct rte_ether_addr *)&addr);
|
|
|
|
|
|
|
|
|
|
printf("ff_kni_alloc get Port %u MAC:"RTE_ETHER_ADDR_PRT_FMT"\n",
|
|
|
|
|
(unsigned)port_id, RTE_ETHER_ADDR_BYTES(&addr));
|
|
|
|
|
|
|
|
|
|
#ifdef FF_KNI_KNI
|
|
|
|
|
if (type == KNI_TYPE_KNI) {
|
|
|
|
|
struct rte_kni_conf conf;
|
|
|
|
|
struct rte_kni_ops ops;
|
|
|
|
|
|
|
|
|
|
/* only support one kni */
|
|
|
|
|
memset(&conf, 0, sizeof(conf));
|
|
|
|
|
snprintf(conf.name, RTE_KNI_NAMESIZE, "veth%u", port_id);
|
|
|
|
|
conf.core_id = rte_lcore_id();
|
|
|
|
|
conf.force_bind = 1;
|
|
|
|
|
conf.group_id = port_id;
|
|
|
|
|
uint16_t mtu;
|
|
|
|
|
rte_eth_dev_get_mtu(port_id, &mtu);
|
|
|
|
|
conf.mbuf_size = mtu + KNI_ENET_HEADER_SIZE + KNI_ENET_FCS_SIZE;
|
|
|
|
|
rte_memcpy(&conf.addr, addr.addr_bytes, RTE_ETHER_ADDR_LEN);
|
|
|
|
|
|
|
|
|
|
memset(&ops, 0, sizeof(ops));
|
|
|
|
|
ops.port_id = port_id;
|
|
|
|
|
ops.change_mtu = kni_change_mtu;
|
|
|
|
|
ops.config_network_if = kni_config_network_interface;
|
|
|
|
|
ops.config_mac_address = kni_config_mac_address;
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->kni = rte_kni_alloc(mbuf_pool, &conf, &ops);
|
|
|
|
|
if (kni_stat[port_id]->kni == NULL)
|
|
|
|
|
rte_panic("create kni on port %u failed!\n", port_id);
|
|
|
|
|
else
|
|
|
|
|
printf("create kni on port %u success!\n", port_id);
|
|
|
|
|
|
|
|
|
|
kni_stat[port_id]->port_id = port_id;
|
2023-09-19 10:38:17 +00:00
|
|
|
|
} else if (type == KNI_TYPE_VIRTIO)
|
2023-09-19 10:28:57 +00:00
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* to add virtio port for exception path(KNI),
|
|
|
|
|
* see https://doc.dpdk.org/guides/howto/virtio_user_as_exception_path.html#virtio-user-as-exception-path
|
|
|
|
|
*/
|
|
|
|
|
char port_name[32];
|
|
|
|
|
char port_args[256];
|
|
|
|
|
|
|
|
|
|
/* set the name and arguments */
|
|
|
|
|
snprintf(port_name, sizeof(port_name), "virtio_user%u", port_id);
|
|
|
|
|
snprintf(port_args, sizeof(port_args),
|
|
|
|
|
"path=/dev/vhost-net,queues=1,queue_size=%u,iface=veth%d,mac=" RTE_ETHER_ADDR_PRT_FMT,
|
|
|
|
|
ring_queue_size, port_id, RTE_ETHER_ADDR_BYTES(&addr));
|
|
|
|
|
printf("ff_kni_alloc to rte_eal_hotplug_add virtio user port, portname:%s, portargs:%s\n",
|
|
|
|
|
port_name, port_args);
|
|
|
|
|
|
|
|
|
|
/* add the vdev for virtio_user */
|
|
|
|
|
if (rte_eal_hotplug_add("vdev", port_name, port_args) < 0) {
|
|
|
|
|
rte_exit(EXIT_FAILURE, "ff_kni_alloc cannot create virtio user paired port for port %u\n", port_id);
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
2023-09-19 10:28:57 +00:00
|
|
|
|
kni_stat[port_id]->port_id = port_idx + nb_dev_ports;
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
char ring_name[RTE_KNI_NAMESIZE];
|
|
|
|
|
snprintf((char*)ring_name, RTE_KNI_NAMESIZE, "kni_ring_%u", port_id);
|
|
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
2023-09-13 12:23:25 +00:00
|
|
|
|
kni_rp[port_id] = rte_ring_create(ring_name, ring_queue_size,
|
2017-04-21 10:43:26 +00:00
|
|
|
|
socket_id, RING_F_SC_DEQ);
|
2017-05-23 15:13:49 +00:00
|
|
|
|
|
|
|
|
|
if (rte_ring_lookup(ring_name) != kni_rp[port_id])
|
|
|
|
|
rte_panic("lookup kni ring failed!\n");
|
2017-04-21 10:43:26 +00:00
|
|
|
|
} else {
|
|
|
|
|
kni_rp[port_id] = rte_ring_lookup(ring_name);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (kni_rp[port_id] == NULL)
|
|
|
|
|
rte_panic("create kni ring failed!\n");
|
|
|
|
|
|
|
|
|
|
printf("create kni ring success, %u ring entries are now free!\n",
|
|
|
|
|
rte_ring_free_count(kni_rp[port_id]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2018-05-15 09:49:22 +00:00
|
|
|
|
ff_kni_process(uint16_t port_id, uint16_t queue_id,
|
2017-04-21 10:43:26 +00:00
|
|
|
|
struct rte_mbuf **pkts_burst, unsigned count)
|
|
|
|
|
{
|
|
|
|
|
kni_process_tx(port_id, queue_id, pkts_burst, count);
|
2023-09-19 10:38:17 +00:00
|
|
|
|
kni_process_rx(port_id, queue_id, pkts_burst, count);
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* enqueue the packet, and own it */
|
|
|
|
|
int
|
2024-10-10 09:48:40 +00:00
|
|
|
|
ff_kni_enqueue(enum FilterReturn filter, uint16_t port_id, struct rte_mbuf *pkt)
|
2017-04-21 10:43:26 +00:00
|
|
|
|
{
|
2024-10-10 09:48:40 +00:00
|
|
|
|
if (filter >= FILTER_ARP) {
|
|
|
|
|
if (ff_global_cfg.kni.console_packets_ratelimit) {
|
|
|
|
|
kni_rate_limt.console_packets++;
|
|
|
|
|
if (kni_rate_limt.console_packets > ff_global_cfg.kni.console_packets_ratelimit) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if (ff_global_cfg.kni.general_packets_ratelimit) {
|
|
|
|
|
kni_rate_limt.gerneal_packets++;
|
|
|
|
|
if (kni_rate_limt.gerneal_packets > ff_global_cfg.kni.general_packets_ratelimit) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-21 10:43:26 +00:00
|
|
|
|
int ret = rte_ring_enqueue(kni_rp[port_id], pkt);
|
2024-10-10 09:48:40 +00:00
|
|
|
|
if (ret < 0) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
2017-04-21 10:43:26 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
2024-10-10 09:48:40 +00:00
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
rte_pktmbuf_free(pkt);
|
|
|
|
|
|
|
|
|
|
return -1;
|
2017-04-21 10:43:26 +00:00
|
|
|
|
}
|
|
|
|
|
|