f-stack/dpdk/drivers/net/bonding/rte_eth_bond_api.c

1101 lines
30 KiB
C
Raw Normal View History

2019-06-25 11:12:58 +00:00
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2017 Intel Corporation
2017-04-21 10:43:26 +00:00
*/
#include <string.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
2022-09-06 04:00:10 +00:00
#include <ethdev_driver.h>
2017-04-21 10:43:26 +00:00
#include <rte_tcp.h>
2023-09-13 12:21:49 +00:00
#include <bus_vdev_driver.h>
#include <rte_kvargs.h>
2017-04-21 10:43:26 +00:00
#include "rte_eth_bond.h"
2020-06-18 16:55:50 +00:00
#include "eth_bond_private.h"
#include "eth_bond_8023ad_private.h"
2017-04-21 10:43:26 +00:00
int
2025-01-10 11:50:43 +00:00
check_for_bonding_ethdev(const struct rte_eth_dev *eth_dev)
2017-04-21 10:43:26 +00:00
{
/* Check valid pointer */
2019-06-26 10:17:41 +00:00
if (eth_dev == NULL ||
eth_dev->device == NULL ||
eth_dev->device->driver == NULL ||
eth_dev->device->driver->name == NULL)
2017-04-21 10:43:26 +00:00
return -1;
/* return 0 if driver name matches */
return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
2017-04-21 10:43:26 +00:00
}
int
2025-01-10 11:50:43 +00:00
valid_bonding_port_id(uint16_t port_id)
2017-04-21 10:43:26 +00:00
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
2025-01-10 11:50:43 +00:00
return check_for_bonding_ethdev(&rte_eth_devices[port_id]);
2017-04-21 10:43:26 +00:00
}
int
2025-01-10 11:50:43 +00:00
check_for_main_bonding_ethdev(const struct rte_eth_dev *eth_dev)
{
int i;
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (check_for_bonding_ethdev(eth_dev) != 0)
return 0;
internals = eth_dev->data->dev_private;
2025-01-10 11:50:43 +00:00
/* Check if any of member devices is a bonding device */
for (i = 0; i < internals->member_count; i++)
if (valid_bonding_port_id(internals->members[i].port_id) == 0)
return 1;
return 0;
}
int
2025-01-10 11:50:43 +00:00
valid_member_port_id(struct bond_dev_private *internals, uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
RTE_ETH_VALID_PORTID_OR_ERR_RET(member_port_id, -1);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* Verify that member_port_id refers to a non bonding port */
if (check_for_bonding_ethdev(&rte_eth_devices[member_port_id]) == 0 &&
2022-09-02 04:40:05 +00:00
internals->mode == BONDING_MODE_8023AD) {
2025-01-10 11:50:43 +00:00
RTE_BOND_LOG(ERR, "Cannot add member to bonding device in 802.3ad"
" mode as member is also a bonding device, only "
"physical devices can be support in this mode.");
2017-04-21 10:43:26 +00:00
return -1;
}
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (internals->port_id == member_port_id) {
2022-09-02 04:40:05 +00:00
RTE_BOND_LOG(ERR,
2025-01-10 11:50:43 +00:00
"Cannot add the bonding device itself as its member.");
2022-09-02 04:40:05 +00:00
return -1;
}
2017-04-21 10:43:26 +00:00
return 0;
}
void
2025-01-10 11:50:43 +00:00
activate_member(struct rte_eth_dev *eth_dev, uint16_t port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
2025-01-10 11:50:43 +00:00
uint16_t active_count = internals->active_member_count;
2017-04-21 10:43:26 +00:00
if (internals->mode == BONDING_MODE_8023AD)
2025-01-10 11:50:43 +00:00
bond_mode_8023ad_activate_member(eth_dev, port_id);
2017-04-21 10:43:26 +00:00
if (internals->mode == BONDING_MODE_TLB
|| internals->mode == BONDING_MODE_ALB) {
2025-01-10 11:50:43 +00:00
internals->tlb_members_order[active_count] = port_id;
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
RTE_ASSERT(internals->active_member_count <
(RTE_DIM(internals->active_members) - 1));
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
internals->active_members[internals->active_member_count] = port_id;
internals->active_member_count++;
2017-04-21 10:43:26 +00:00
if (internals->mode == BONDING_MODE_TLB)
2025-01-10 11:50:43 +00:00
bond_tlb_activate_member(internals);
2017-04-21 10:43:26 +00:00
if (internals->mode == BONDING_MODE_ALB)
bond_mode_alb_client_list_upd(eth_dev);
}
void
2025-01-10 11:50:43 +00:00
deactivate_member(struct rte_eth_dev *eth_dev, uint16_t port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
uint16_t member_pos;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals = eth_dev->data->dev_private;
2025-01-10 11:50:43 +00:00
uint16_t active_count = internals->active_member_count;
2017-04-21 10:43:26 +00:00
if (internals->mode == BONDING_MODE_8023AD) {
bond_mode_8023ad_stop(eth_dev);
2025-01-10 11:50:43 +00:00
bond_mode_8023ad_deactivate_member(eth_dev, port_id);
2017-04-21 10:43:26 +00:00
} else if (internals->mode == BONDING_MODE_TLB
|| internals->mode == BONDING_MODE_ALB)
bond_tlb_disable(internals);
2025-01-10 11:50:43 +00:00
member_pos = find_member_by_id(internals->active_members, active_count,
2017-04-21 10:43:26 +00:00
port_id);
2025-01-10 11:50:43 +00:00
/*
* If member was not at the end of the list
* shift active members up active array list.
*/
if (member_pos < active_count) {
2017-04-21 10:43:26 +00:00
active_count--;
2025-01-10 11:50:43 +00:00
memmove(internals->active_members + member_pos,
internals->active_members + member_pos + 1,
(active_count - member_pos) *
sizeof(internals->active_members[0]));
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
RTE_ASSERT(active_count < RTE_DIM(internals->active_members));
internals->active_member_count = active_count;
2017-04-21 10:43:26 +00:00
if (eth_dev->data->dev_started) {
if (internals->mode == BONDING_MODE_8023AD) {
bond_mode_8023ad_start(eth_dev);
} else if (internals->mode == BONDING_MODE_TLB) {
bond_tlb_enable(internals);
} else if (internals->mode == BONDING_MODE_ALB) {
bond_tlb_enable(internals);
bond_mode_alb_client_list_upd(eth_dev);
}
}
}
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
struct bond_dev_private *internals;
2023-09-13 12:21:49 +00:00
struct rte_eth_dev *bond_dev;
char devargs[52];
int ret;
2017-04-21 10:43:26 +00:00
if (name == NULL) {
RTE_BOND_LOG(ERR, "Invalid name specified");
return -EINVAL;
2017-04-21 10:43:26 +00:00
}
ret = snprintf(devargs, sizeof(devargs),
"driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
if (ret < 0 || ret >= (int)sizeof(devargs))
return -ENOMEM;
2017-04-21 10:43:26 +00:00
ret = rte_vdev_init(name, devargs);
if (ret)
2021-01-28 17:08:59 +00:00
return ret;
2017-04-21 10:43:26 +00:00
2023-09-13 12:21:49 +00:00
bond_dev = rte_eth_dev_get_by_name(name);
RTE_ASSERT(bond_dev);
2017-04-21 10:43:26 +00:00
/*
* To make bond_ethdev_configure() happy we need to free the
* internals->kvlist here.
*
* Also see comment in bond_ethdev_configure().
*/
2023-09-13 12:21:49 +00:00
internals = bond_dev->data->dev_private;
rte_kvargs_free(internals->kvlist);
internals->kvlist = NULL;
2017-04-21 10:43:26 +00:00
2023-09-13 12:21:49 +00:00
return bond_dev->data->port_id;
2017-04-21 10:43:26 +00:00
}
int
rte_eth_bond_free(const char *name)
{
return rte_vdev_uninit(name);
}
2017-04-21 10:43:26 +00:00
static int
2025-01-10 11:50:43 +00:00
member_vlan_filter_set(uint16_t bonding_port_id, uint16_t member_port_id)
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
struct bond_dev_private *internals;
int found;
int res = 0;
uint64_t slab = 0;
uint32_t pos = 0;
uint16_t first;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
if ((bonding_eth_dev->data->dev_conf.rxmode.offloads &
2022-09-06 04:00:10 +00:00
RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
internals = bonding_eth_dev->data->dev_private;
found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
first = pos;
if (!found)
return 0;
do {
uint32_t i;
uint64_t mask;
for (i = 0, mask = 1;
i < RTE_BITMAP_SLAB_BIT_SIZE;
i ++, mask <<= 1) {
2018-11-21 08:34:11 +00:00
if (unlikely(slab & mask)) {
uint16_t vlan_id = pos + i;
2025-01-10 11:50:43 +00:00
res = rte_eth_dev_vlan_filter(member_port_id,
2018-11-21 08:34:11 +00:00
vlan_id, 1);
}
}
found = rte_bitmap_scan(internals->vlan_filter_bmp,
&pos, &slab);
} while (found && first != pos && res == 0);
2017-04-21 10:43:26 +00:00
return res;
2017-04-21 10:43:26 +00:00
}
2019-06-25 11:12:58 +00:00
static int
2025-01-10 11:50:43 +00:00
member_rte_flow_prepare(uint16_t member_id, struct bond_dev_private *internals)
2019-06-25 11:12:58 +00:00
{
struct rte_flow *flow;
struct rte_flow_error ferror;
2025-01-10 11:50:43 +00:00
uint16_t member_port_id = internals->members[member_id].port_id;
2019-06-25 11:12:58 +00:00
if (internals->flow_isolated_valid != 0) {
2025-01-10 11:50:43 +00:00
if (rte_eth_dev_stop(member_port_id) != 0) {
2021-02-05 08:48:47 +00:00
RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2025-01-10 11:50:43 +00:00
member_port_id);
2021-02-05 08:48:47 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
if (rte_flow_isolate(member_port_id, internals->flow_isolated,
2019-06-25 11:12:58 +00:00
&ferror)) {
2025-01-10 11:50:43 +00:00
RTE_BOND_LOG(ERR, "rte_flow_isolate failed for member"
" %d: %s", member_id, ferror.message ?
2019-06-25 11:12:58 +00:00
ferror.message : "(no stated reason)");
return -1;
}
}
TAILQ_FOREACH(flow, &internals->flow_list, next) {
2025-01-10 11:50:43 +00:00
flow->flows[member_id] = rte_flow_create(member_port_id,
2019-06-25 11:12:58 +00:00
flow->rule.attr,
flow->rule.pattern,
flow->rule.actions,
&ferror);
2025-01-10 11:50:43 +00:00
if (flow->flows[member_id] == NULL) {
RTE_BOND_LOG(ERR, "Cannot create flow for member"
" %d: %s", member_id,
2019-06-25 11:12:58 +00:00
ferror.message ? ferror.message :
"(no stated reason)");
2025-01-10 11:50:43 +00:00
/* Destroy successful bond flows from the member */
2019-06-25 11:12:58 +00:00
TAILQ_FOREACH(flow, &internals->flow_list, next) {
2025-01-10 11:50:43 +00:00
if (flow->flows[member_id] != NULL) {
rte_flow_destroy(member_port_id,
flow->flows[member_id],
2019-06-25 11:12:58 +00:00
&ferror);
2025-01-10 11:50:43 +00:00
flow->flows[member_id] = NULL;
2019-06-25 11:12:58 +00:00
}
}
return -1;
}
}
return 0;
}
static void
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_rx_first(struct bond_dev_private *internals,
2019-06-25 11:12:58 +00:00
const struct rte_eth_dev_info *di)
{
struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
internals->reta_size = di->reta_size;
2022-09-02 04:40:05 +00:00
internals->rss_key_len = di->hash_key_size;
2019-06-25 11:12:58 +00:00
2025-01-10 11:50:43 +00:00
/* Inherit Rx offload capabilities from the first member device */
2019-06-25 11:12:58 +00:00
internals->rx_offload_capa = di->rx_offload_capa;
internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
2025-01-10 11:50:43 +00:00
/* Inherit maximum Rx packet size from the first member device */
2019-06-25 11:12:58 +00:00
internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
2025-01-10 11:50:43 +00:00
/* Inherit default Rx queue settings from the first member device */
2019-06-25 11:12:58 +00:00
memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
/*
* Turn off descriptor prefetch and writeback by default for all
2025-01-10 11:50:43 +00:00
* member devices. Applications may tweak this setting if need be.
2019-06-25 11:12:58 +00:00
*/
rxconf_i->rx_thresh.pthresh = 0;
rxconf_i->rx_thresh.hthresh = 0;
rxconf_i->rx_thresh.wthresh = 0;
/* Setting this to zero should effectively enable default values */
rxconf_i->rx_free_thresh = 0;
2025-01-10 11:50:43 +00:00
/* Disable deferred start by default for all member devices */
2019-06-25 11:12:58 +00:00
rxconf_i->rx_deferred_start = 0;
}
static void
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_tx_first(struct bond_dev_private *internals,
2019-06-25 11:12:58 +00:00
const struct rte_eth_dev_info *di)
{
struct rte_eth_txconf *txconf_i = &internals->default_txconf;
2025-01-10 11:50:43 +00:00
/* Inherit Tx offload capabilities from the first member device */
2019-06-25 11:12:58 +00:00
internals->tx_offload_capa = di->tx_offload_capa;
internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
2025-01-10 11:50:43 +00:00
/* Inherit default Tx queue settings from the first member device */
2019-06-25 11:12:58 +00:00
memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
/*
* Turn off descriptor prefetch and writeback by default for all
2025-01-10 11:50:43 +00:00
* member devices. Applications may tweak this setting if need be.
2019-06-25 11:12:58 +00:00
*/
txconf_i->tx_thresh.pthresh = 0;
txconf_i->tx_thresh.hthresh = 0;
txconf_i->tx_thresh.wthresh = 0;
/*
* Setting these parameters to zero assumes that default
2025-01-10 11:50:43 +00:00
* values will be configured implicitly by member devices.
2019-06-25 11:12:58 +00:00
*/
txconf_i->tx_free_thresh = 0;
txconf_i->tx_rs_thresh = 0;
2025-01-10 11:50:43 +00:00
/* Disable deferred start by default for all member devices */
2019-06-25 11:12:58 +00:00
txconf_i->tx_deferred_start = 0;
}
static void
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_rx_next(struct bond_dev_private *internals,
2019-06-25 11:12:58 +00:00
const struct rte_eth_dev_info *di)
{
struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
internals->rx_offload_capa &= di->rx_offload_capa;
internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
/*
2025-01-10 11:50:43 +00:00
* If at least one member device suggests enabling this
* setting by default, enable it for all member devices
2019-06-25 11:12:58 +00:00
* since disabling it may not be necessarily supported.
*/
if (rxconf->rx_drop_en == 1)
rxconf_i->rx_drop_en = 1;
/*
2025-01-10 11:50:43 +00:00
* Adding a new member device may cause some of previously inherited
2019-06-25 11:12:58 +00:00
* offloads to be withdrawn from the internal rx_queue_offload_capa
* value. Thus, the new internal value of default Rx queue offloads
* has to be masked by rx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
2025-01-10 11:50:43 +00:00
* value and the value being inherited from the new member device.
2019-06-25 11:12:58 +00:00
*/
rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
internals->rx_queue_offload_capa;
/*
2025-01-10 11:50:43 +00:00
* RETA size is GCD of all members RETA sizes, so, if all sizes will be
2019-06-25 11:12:58 +00:00
* the power of 2, the lower one is GCD
*/
if (internals->reta_size > di->reta_size)
internals->reta_size = di->reta_size;
2022-09-02 04:40:05 +00:00
if (internals->rss_key_len > di->hash_key_size) {
2025-01-10 11:50:43 +00:00
RTE_BOND_LOG(WARNING, "member has different rss key size, "
2022-09-02 04:40:05 +00:00
"configuring rss may fail");
internals->rss_key_len = di->hash_key_size;
}
2019-06-25 11:12:58 +00:00
if (!internals->max_rx_pktlen &&
di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
}
static void
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_tx_next(struct bond_dev_private *internals,
2019-06-25 11:12:58 +00:00
const struct rte_eth_dev_info *di)
{
struct rte_eth_txconf *txconf_i = &internals->default_txconf;
const struct rte_eth_txconf *txconf = &di->default_txconf;
internals->tx_offload_capa &= di->tx_offload_capa;
internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
/*
2025-01-10 11:50:43 +00:00
* Adding a new member device may cause some of previously inherited
2019-06-25 11:12:58 +00:00
* offloads to be withdrawn from the internal tx_queue_offload_capa
* value. Thus, the new internal value of default Tx queue offloads
* has to be masked by tx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
2025-01-10 11:50:43 +00:00
* value and the value being inherited from the new member device.
2019-06-25 11:12:58 +00:00
*/
txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
internals->tx_queue_offload_capa;
}
static void
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
const struct rte_eth_desc_lim *member_desc_lim)
2019-06-25 11:12:58 +00:00
{
2025-01-10 11:50:43 +00:00
memcpy(bond_desc_lim, member_desc_lim, sizeof(*bond_desc_lim));
2019-06-25 11:12:58 +00:00
}
static int
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
const struct rte_eth_desc_lim *member_desc_lim)
2019-06-25 11:12:58 +00:00
{
bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
2025-01-10 11:50:43 +00:00
member_desc_lim->nb_max);
2019-06-25 11:12:58 +00:00
bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
2025-01-10 11:50:43 +00:00
member_desc_lim->nb_min);
2019-06-25 11:12:58 +00:00
bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
2025-01-10 11:50:43 +00:00
member_desc_lim->nb_align);
2019-06-25 11:12:58 +00:00
if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
return -EINVAL;
}
/* Treat maximum number of segments equal to 0 as unspecified */
2025-01-10 11:50:43 +00:00
if (member_desc_lim->nb_seg_max != 0 &&
2019-06-25 11:12:58 +00:00
(bond_desc_lim->nb_seg_max == 0 ||
2025-01-10 11:50:43 +00:00
member_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
bond_desc_lim->nb_seg_max = member_desc_lim->nb_seg_max;
if (member_desc_lim->nb_mtu_seg_max != 0 &&
2019-06-25 11:12:58 +00:00
(bond_desc_lim->nb_mtu_seg_max == 0 ||
2025-01-10 11:50:43 +00:00
member_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
bond_desc_lim->nb_mtu_seg_max = member_desc_lim->nb_mtu_seg_max;
2019-06-25 11:12:58 +00:00
return 0;
}
2017-04-21 10:43:26 +00:00
static int
2025-01-10 11:50:43 +00:00
__eth_bond_member_add_lock_free(uint16_t bonding_port_id, uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev, *member_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
struct rte_eth_link link_props;
struct rte_eth_dev_info dev_info;
2020-06-18 16:55:50 +00:00
int ret;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (valid_member_port_id(internals, member_port_id) != 0)
return -1;
2025-01-10 11:50:43 +00:00
member_eth_dev = &rte_eth_devices[member_port_id];
if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDING_MEMBER) {
RTE_BOND_LOG(ERR, "Member device is already a member of a bonding device");
2017-04-21 10:43:26 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
ret = rte_eth_dev_info_get(member_port_id, &dev_info);
2020-06-18 16:55:50 +00:00
if (ret != 0) {
RTE_BOND_LOG(ERR,
2025-01-10 11:50:43 +00:00
"%s: Error during getting device (port %u) info: %s",
__func__, member_port_id, strerror(-ret));
2020-06-18 16:55:50 +00:00
return ret;
}
2017-04-21 10:43:26 +00:00
if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
2025-01-10 11:50:43 +00:00
RTE_BOND_LOG(ERR, "Member (port %u) max_rx_pktlen too small",
member_port_id);
2017-04-21 10:43:26 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
member_add(internals, member_eth_dev);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* We need to store members reta_size to be able to synchronize RETA for all
* member devices even if its sizes are different.
2017-04-21 10:43:26 +00:00
*/
2025-01-10 11:50:43 +00:00
internals->members[internals->member_count].reta_size = dev_info.reta_size;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (internals->member_count < 1) {
/*
* if MAC is not user defined then use MAC of first member add to
* bonding device.
*/
if (!internals->user_defined_mac) {
2025-01-10 11:50:43 +00:00
if (mac_address_set(bonding_eth_dev,
member_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to set MAC address");
return -1;
}
}
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* Make primary member */
internals->primary_port = member_port_id;
internals->current_primary_port = member_port_id;
2017-04-21 10:43:26 +00:00
2023-09-13 12:21:49 +00:00
internals->speed_capa = dev_info.speed_capa;
2025-01-10 11:50:43 +00:00
/* Inherit queues settings from first member */
internals->nb_rx_queues = member_eth_dev->data->nb_rx_queues;
internals->nb_tx_queues = member_eth_dev->data->nb_tx_queues;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_rx_first(internals, &dev_info);
eth_bond_member_inherit_dev_info_tx_first(internals, &dev_info);
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_desc_lim_first(&internals->rx_desc_lim,
2019-06-25 11:12:58 +00:00
&dev_info.rx_desc_lim);
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_desc_lim_first(&internals->tx_desc_lim,
2019-06-25 11:12:58 +00:00
&dev_info.tx_desc_lim);
} else {
2019-06-25 11:12:58 +00:00
int ret;
2017-04-21 10:43:26 +00:00
2023-09-13 12:21:49 +00:00
internals->speed_capa &= dev_info.speed_capa;
2025-01-10 11:50:43 +00:00
eth_bond_member_inherit_dev_info_rx_next(internals, &dev_info);
eth_bond_member_inherit_dev_info_tx_next(internals, &dev_info);
2018-12-06 14:17:51 +00:00
2025-01-10 11:50:43 +00:00
ret = eth_bond_member_inherit_desc_lim_next(&internals->rx_desc_lim,
&dev_info.rx_desc_lim);
2019-06-25 11:12:58 +00:00
if (ret != 0)
return ret;
2025-01-10 11:50:43 +00:00
ret = eth_bond_member_inherit_desc_lim_next(&internals->tx_desc_lim,
&dev_info.tx_desc_lim);
2019-06-25 11:12:58 +00:00
if (ret != 0)
return ret;
2017-04-21 10:43:26 +00:00
}
2023-09-11 06:58:14 +00:00
/* Bond mode Broadcast & 8023AD don't support MBUF_FAST_FREE offload. */
if (internals->mode == BONDING_MODE_8023AD ||
internals->mode == BONDING_MODE_BROADCAST)
internals->tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2025-01-10 11:50:43 +00:00
bonding_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
2017-04-21 10:43:26 +00:00
internals->flow_type_rss_offloads;
2025-01-10 11:50:43 +00:00
if (member_rte_flow_prepare(internals->member_count, internals) != 0) {
RTE_BOND_LOG(ERR, "Failed to prepare new member flows: port=%d",
member_port_id);
2019-06-25 11:12:58 +00:00
return -1;
}
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* Add additional MAC addresses to the member */
if (member_add_mac_addresses(bonding_eth_dev, member_port_id) != 0) {
RTE_BOND_LOG(ERR, "Failed to add mac address(es) to member %hu",
member_port_id);
2019-06-25 11:12:58 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
internals->member_count++;
2025-01-10 11:50:43 +00:00
if (bonding_eth_dev->data->dev_started) {
if (member_configure(bonding_eth_dev, member_eth_dev) != 0) {
internals->member_count--;
RTE_BOND_LOG(ERR, "rte_bond_members_configure: port=%d",
member_port_id);
2017-04-21 10:43:26 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
if (member_start(bonding_eth_dev, member_eth_dev) != 0) {
internals->member_count--;
RTE_BOND_LOG(ERR, "rte_bond_members_start: port=%d",
member_port_id);
2022-09-06 04:00:10 +00:00
return -1;
}
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
/* Update all member devices MACs */
mac_address_members_update(bonding_eth_dev);
2019-06-25 11:12:58 +00:00
2025-01-10 11:50:43 +00:00
/*
* Register link status change callback with bonding device pointer as
* argument.
*/
rte_eth_dev_callback_register(member_port_id, RTE_ETH_EVENT_INTR_LSC,
bond_ethdev_lsc_event_callback, &bonding_eth_dev->data->port_id);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/*
* If bonding device is started then we can add the member to our active
* member array.
*/
if (bonding_eth_dev->data->dev_started) {
ret = rte_eth_link_get_nowait(member_port_id, &link_props);
2020-06-18 16:55:50 +00:00
if (ret < 0) {
2025-01-10 11:50:43 +00:00
rte_eth_dev_callback_unregister(member_port_id,
2020-06-18 16:55:50 +00:00
RTE_ETH_EVENT_INTR_LSC,
bond_ethdev_lsc_event_callback,
2025-01-10 11:50:43 +00:00
&bonding_eth_dev->data->port_id);
internals->member_count--;
2020-06-18 16:55:50 +00:00
RTE_BOND_LOG(ERR,
2025-01-10 11:50:43 +00:00
"Member (port %u) link get failed: %s",
member_port_id, rte_strerror(-ret));
2020-06-18 16:55:50 +00:00
return -1;
}
2017-04-21 10:43:26 +00:00
2022-09-06 04:00:10 +00:00
if (link_props.link_status == RTE_ETH_LINK_UP) {
2025-01-10 11:50:43 +00:00
if (internals->active_member_count == 0 &&
2017-04-21 10:43:26 +00:00
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
2025-01-10 11:50:43 +00:00
member_port_id);
2017-04-21 10:43:26 +00:00
}
}
2025-01-10 11:50:43 +00:00
/* Add member details to bonding device */
member_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDING_MEMBER;
2020-06-18 16:55:50 +00:00
2025-01-10 11:50:43 +00:00
member_vlan_filter_set(bonding_port_id, member_port_id);
2017-04-21 10:43:26 +00:00
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
int retval;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (valid_member_port_id(internals, member_port_id) != 0)
2022-09-02 04:40:05 +00:00
return -1;
2017-04-21 10:43:26 +00:00
rte_spinlock_lock(&internals->lock);
2025-01-10 11:50:43 +00:00
retval = __eth_bond_member_add_lock_free(bonding_port_id, member_port_id);
2017-04-21 10:43:26 +00:00
rte_spinlock_unlock(&internals->lock);
return retval;
}
static int
2025-01-10 11:50:43 +00:00
__eth_bond_member_remove_lock_free(uint16_t bonding_port_id,
uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *member_eth_dev;
2019-06-25 11:12:58 +00:00
struct rte_flow_error flow_error;
struct rte_flow *flow;
2025-01-10 11:50:43 +00:00
int i, member_idx;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (valid_member_port_id(internals, member_port_id) < 0)
return -1;
2025-01-10 11:50:43 +00:00
/* first remove from active member list */
member_idx = find_member_by_id(internals->active_members,
internals->active_member_count, member_port_id);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (member_idx < internals->active_member_count)
deactivate_member(bonding_eth_dev, member_port_id);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
member_idx = -1;
/* now find in member list */
for (i = 0; i < internals->member_count; i++)
if (internals->members[i].port_id == member_port_id) {
member_idx = i;
2017-04-21 10:43:26 +00:00
break;
}
2025-01-10 11:50:43 +00:00
if (member_idx < 0) {
RTE_BOND_LOG(ERR, "Could not find member in port list, member count %u",
internals->member_count);
2017-04-21 10:43:26 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
/* Un-register link status change callback with bonding device pointer as
2017-04-21 10:43:26 +00:00
* argument*/
2025-01-10 11:50:43 +00:00
rte_eth_dev_callback_unregister(member_port_id, RTE_ETH_EVENT_INTR_LSC,
2017-04-21 10:43:26 +00:00
bond_ethdev_lsc_event_callback,
2025-01-10 11:50:43 +00:00
&rte_eth_devices[bonding_port_id].data->port_id);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* Restore original MAC address of member device */
rte_eth_dev_default_mac_addr_set(member_port_id,
&internals->members[member_idx].persisted_mac_addr);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* remove additional MAC addresses from the member */
member_remove_mac_addresses(bonding_eth_dev, member_port_id);
2019-06-25 11:12:58 +00:00
/*
2025-01-10 11:50:43 +00:00
* Remove bond device flows from member device.
2019-06-25 11:12:58 +00:00
* Note: don't restore flow isolate mode.
*/
TAILQ_FOREACH(flow, &internals->flow_list, next) {
2025-01-10 11:50:43 +00:00
if (flow->flows[member_idx] != NULL) {
rte_flow_destroy(member_port_id, flow->flows[member_idx],
2019-06-25 11:12:58 +00:00
&flow_error);
2025-01-10 11:50:43 +00:00
flow->flows[member_idx] = NULL;
2019-06-25 11:12:58 +00:00
}
}
2023-09-11 06:58:14 +00:00
/* Remove the dedicated queues flow */
if (internals->mode == BONDING_MODE_8023AD &&
internals->mode4.dedicated_queues.enabled == 1 &&
2025-01-10 11:50:43 +00:00
internals->mode4.dedicated_queues.flow[member_port_id] != NULL) {
rte_flow_destroy(member_port_id,
internals->mode4.dedicated_queues.flow[member_port_id],
2023-09-11 06:58:14 +00:00
&flow_error);
2025-01-10 11:50:43 +00:00
internals->mode4.dedicated_queues.flow[member_port_id] = NULL;
2023-09-11 06:58:14 +00:00
}
2025-01-10 11:50:43 +00:00
member_eth_dev = &rte_eth_devices[member_port_id];
member_remove(internals, member_eth_dev);
member_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDING_MEMBER);
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* first member in the active list will be the primary by default,
2017-04-21 10:43:26 +00:00
* otherwise use first device in list */
2025-01-10 11:50:43 +00:00
if (internals->current_primary_port == member_port_id) {
if (internals->active_member_count > 0)
internals->current_primary_port = internals->active_members[0];
else if (internals->member_count > 0)
internals->current_primary_port = internals->members[0].port_id;
2017-04-21 10:43:26 +00:00
else
internals->primary_port = 0;
2025-01-10 11:50:43 +00:00
mac_address_members_update(bonding_eth_dev);
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
if (internals->active_member_count < 1) {
/*
* if no members are any longer attached to bonding device and MAC is not
* user defined then clear MAC of bonding device as it will be reset
* when a new member is added.
*/
if (internals->member_count < 1 && !internals->user_defined_mac)
memset(rte_eth_devices[bonding_port_id].data->mac_addrs, 0,
sizeof(*rte_eth_devices[bonding_port_id].data->mac_addrs));
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
if (internals->member_count == 0) {
2017-04-21 10:43:26 +00:00
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
2019-06-25 11:12:58 +00:00
internals->rx_queue_offload_capa = 0;
internals->tx_queue_offload_capa = 0;
2022-09-06 04:00:10 +00:00
internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
2017-04-21 10:43:26 +00:00
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;
}
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
int retval;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
rte_spinlock_lock(&internals->lock);
2025-01-10 11:50:43 +00:00
retval = __eth_bond_member_remove_lock_free(bonding_port_id, member_port_id);
2017-04-21 10:43:26 +00:00
rte_spinlock_unlock(&internals->lock);
return retval;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_mode_set(uint16_t bonding_port_id, uint8_t mode)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
2025-01-10 11:50:43 +00:00
if (check_for_main_bonding_ethdev(bonding_eth_dev) != 0 &&
mode == BONDING_MODE_8023AD)
return -1;
2025-01-10 11:50:43 +00:00
return bond_ethdev_mode_set(bonding_eth_dev, mode);
2017-04-21 10:43:26 +00:00
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_mode_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
return internals->mode;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_primary_set(uint16_t bonding_port_id, uint16_t member_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (valid_member_port_id(internals, member_port_id) != 0)
return -1;
2017-04-21 10:43:26 +00:00
internals->user_defined_primary_port = 1;
2025-01-10 11:50:43 +00:00
internals->primary_port = member_port_id;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
bond_ethdev_primary_set(internals, member_port_id);
2017-04-21 10:43:26 +00:00
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_primary_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (internals->member_count < 1)
2017-04-21 10:43:26 +00:00
return -1;
return internals->current_primary_port;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2019-06-26 10:17:41 +00:00
uint16_t i;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
if (members == NULL)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (internals->member_count > len)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
for (i = 0; i < internals->member_count; i++)
members[i] = internals->members[i].port_id;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
return internals->member_count;
2017-04-21 10:43:26 +00:00
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
if (members == NULL)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
if (internals->active_member_count > len)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
memcpy(members, internals->active_members,
internals->active_member_count * sizeof(internals->active_members[0]));
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
return internals->active_member_count;
2017-04-21 10:43:26 +00:00
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_mac_address_set(uint16_t bonding_port_id,
2020-06-18 16:55:50 +00:00
struct rte_ether_addr *mac_addr)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
2025-01-10 11:50:43 +00:00
/* Set MAC Address of Bonding Device */
if (mac_address_set(bonding_eth_dev, mac_addr))
2017-04-21 10:43:26 +00:00
return -1;
internals->user_defined_mac = 1;
2025-01-10 11:50:43 +00:00
/* Update all member devices MACs*/
if (internals->member_count > 0)
return mac_address_members_update(bonding_eth_dev);
2017-04-21 10:43:26 +00:00
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_mac_address_reset(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
2025-01-10 11:50:43 +00:00
struct rte_eth_dev *bonding_eth_dev;
2017-04-21 10:43:26 +00:00
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
bonding_eth_dev = &rte_eth_devices[bonding_port_id];
internals = bonding_eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
internals->user_defined_mac = 0;
2025-01-10 11:50:43 +00:00
if (internals->member_count > 0) {
int member_port;
/* Get the primary member location based on the primary port
* number as, while member_add(), we will keep the primary
* member based on member_count,but not based on the primary port.
2018-11-21 08:34:11 +00:00
*/
2025-01-10 11:50:43 +00:00
for (member_port = 0; member_port < internals->member_count;
member_port++) {
if (internals->members[member_port].port_id ==
2018-11-21 08:34:11 +00:00
internals->primary_port)
break;
}
2025-01-10 11:50:43 +00:00
/* Set MAC Address of Bonding Device */
if (mac_address_set(bonding_eth_dev,
&internals->members[member_port].persisted_mac_addr)
2017-04-21 10:43:26 +00:00
!= 0) {
2025-01-10 11:50:43 +00:00
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonding device");
2017-04-21 10:43:26 +00:00
return -1;
}
2025-01-10 11:50:43 +00:00
/* Update all member devices MAC addresses */
return mac_address_members_update(bonding_eth_dev);
2017-04-21 10:43:26 +00:00
}
2025-01-10 11:50:43 +00:00
/* No need to update anything as no members present */
2017-04-21 10:43:26 +00:00
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_xmit_policy_set(uint16_t bonding_port_id, uint8_t policy)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
switch (policy) {
case BALANCE_XMIT_POLICY_LAYER2:
internals->balance_xmit_policy = policy;
2019-06-25 11:12:58 +00:00
internals->burst_xmit_hash = burst_xmit_l2_hash;
2017-04-21 10:43:26 +00:00
break;
case BALANCE_XMIT_POLICY_LAYER23:
internals->balance_xmit_policy = policy;
2019-06-25 11:12:58 +00:00
internals->burst_xmit_hash = burst_xmit_l23_hash;
2017-04-21 10:43:26 +00:00
break;
case BALANCE_XMIT_POLICY_LAYER34:
internals->balance_xmit_policy = policy;
2019-06-25 11:12:58 +00:00
internals->burst_xmit_hash = burst_xmit_l34_hash;
2017-04-21 10:43:26 +00:00
break;
default:
return -1;
}
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_xmit_policy_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
return internals->balance_xmit_policy;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_monitoring_set(uint16_t bonding_port_id, uint32_t internal_ms)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
internals->link_status_polling_interval_ms = internal_ms;
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_monitoring_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
return internals->link_status_polling_interval_ms;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_down_prop_delay_set(uint16_t bonding_port_id,
uint32_t delay_ms)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
internals->link_down_delay_ms = delay_ms;
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_down_prop_delay_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
return internals->link_down_delay_ms;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_up_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
internals->link_up_delay_ms = delay_ms;
return 0;
}
int
2025-01-10 11:50:43 +00:00
rte_eth_bond_link_up_prop_delay_get(uint16_t bonding_port_id)
2017-04-21 10:43:26 +00:00
{
struct bond_dev_private *internals;
2025-01-10 11:50:43 +00:00
if (valid_bonding_port_id(bonding_port_id) != 0)
2017-04-21 10:43:26 +00:00
return -1;
2025-01-10 11:50:43 +00:00
internals = rte_eth_devices[bonding_port_id].data->dev_private;
2017-04-21 10:43:26 +00:00
return internals->link_up_delay_ms;
}