mirror of https://github.com/F-Stack/f-stack.git
DPDK: upgrade to 17.11.4 LTS.
This commit is contained in:
parent
f6e5885fc4
commit
579bf1e28c
|
@ -155,13 +155,13 @@ F: test/test/test_bitmap.c
|
|||
|
||||
ARM v7
|
||||
M: Jan Viktorin <viktorin@rehivetech.com>
|
||||
M: Jianbo Liu <jianbo.liu@arm.com>
|
||||
M: Gavin Hu <gavin.hu@arm.com>
|
||||
F: lib/librte_eal/common/arch/arm/
|
||||
F: lib/librte_eal/common/include/arch/arm/
|
||||
|
||||
ARM v8
|
||||
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
|
||||
M: Jianbo Liu <jianbo.liu@arm.com>
|
||||
M: Gavin Hu <gavin.hu@arm.com>
|
||||
F: lib/librte_eal/common/include/arch/arm/*_64.h
|
||||
F: lib/librte_net/net_crc_neon.h
|
||||
F: lib/librte_acl/acl_run_neon.*
|
||||
|
@ -428,14 +428,14 @@ F: drivers/net/fm10k/
|
|||
F: doc/guides/nics/features/fm10k*.ini
|
||||
|
||||
Mellanox mlx4
|
||||
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
|
||||
M: Matan Azrad <matan@mellanox.com>
|
||||
M: Shahaf Shuler <shahafs@mellanox.com>
|
||||
F: drivers/net/mlx4/
|
||||
F: doc/guides/nics/mlx4.rst
|
||||
F: doc/guides/nics/features/mlx4.ini
|
||||
|
||||
Mellanox mlx5
|
||||
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
|
||||
M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
|
||||
M: Shahaf Shuler <shahafs@mellanox.com>
|
||||
M: Yongseok Koh <yskoh@mellanox.com>
|
||||
F: drivers/net/mlx5/
|
||||
F: doc/guides/nics/mlx5.rst
|
||||
|
@ -446,7 +446,6 @@ M: Jacek Siuda <jck@semihalf.com>
|
|||
M: Tomasz Duszynski <tdu@semihalf.com>
|
||||
M: Dmitri Epshtein <dima@marvell.com>
|
||||
M: Natalie Samsonov <nsamsono@marvell.com>
|
||||
M: Jianbo Liu <jianbo.liu@arm.com>
|
||||
F: drivers/net/mrvl/
|
||||
F: doc/guides/nics/mrvl.rst
|
||||
F: doc/guides/nics/features/mrvl.ini
|
||||
|
@ -630,7 +629,6 @@ M: Jacek Siuda <jck@semihalf.com>
|
|||
M: Tomasz Duszynski <tdu@semihalf.com>
|
||||
M: Dmitri Epshtein <dima@marvell.com>
|
||||
M: Natalie Samsonov <nsamsono@marvell.com>
|
||||
M: Jianbo Liu <jianbo.liu@arm.org>
|
||||
F: drivers/crypto/mrvl/
|
||||
F: doc/guides/cryptodevs/mrvl.rst
|
||||
F: doc/guides/cryptodevs/features/mrvl.ini
|
||||
|
|
|
@ -188,7 +188,7 @@ proc_info_preparse_args(int argc, char **argv)
|
|||
proc_info_usage(prgname);
|
||||
return -1;
|
||||
}
|
||||
strncpy(host_id, argv[i+1], sizeof(host_id));
|
||||
snprintf(host_id, sizeof(host_id), "%s", argv[i+1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -507,6 +507,7 @@ cperf_create_session(struct rte_mempool *sess_mp,
|
|||
auth_xform.next = NULL;
|
||||
auth_xform.auth.algo = options->auth_algo;
|
||||
auth_xform.auth.op = options->auth_op;
|
||||
auth_xform.auth.iv.offset = iv_offset;
|
||||
|
||||
/* auth different than null */
|
||||
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
|
||||
|
@ -561,6 +562,8 @@ cperf_create_session(struct rte_mempool *sess_mp,
|
|||
auth_xform.next = NULL;
|
||||
auth_xform.auth.algo = options->auth_algo;
|
||||
auth_xform.auth.op = options->auth_op;
|
||||
auth_xform.auth.iv.offset = iv_offset +
|
||||
cipher_xform.cipher.iv.length;
|
||||
|
||||
/* auth different than null */
|
||||
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
|
||||
|
|
|
@ -119,7 +119,7 @@ mempool_obj_init(struct rte_mempool *mp,
|
|||
op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
|
||||
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
|
||||
op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
|
||||
op->phys_addr = rte_mem_virt2phy(obj);
|
||||
op->phys_addr = rte_mem_virt2iova(obj);
|
||||
op->mempool = mp;
|
||||
|
||||
/* Set source buffer */
|
||||
|
|
|
@ -534,8 +534,7 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
|
|||
if (entry == NULL)
|
||||
return -1;
|
||||
|
||||
memset(entry, 0, strlen(line) + 1);
|
||||
strncpy(entry, line, strlen(line));
|
||||
strcpy(entry, line);
|
||||
|
||||
/* check if entry ends with , or = */
|
||||
if (entry[strlen(entry) - 1] == ','
|
||||
|
@ -552,8 +551,8 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
|
|||
if (entry_extended == NULL)
|
||||
goto err;
|
||||
entry = entry_extended;
|
||||
|
||||
strncat(entry, line, strlen(line));
|
||||
/* entry has been allocated accordingly */
|
||||
strcpy(&entry[strlen(entry)], line);
|
||||
|
||||
if (entry[strlen(entry) - 1] != ',')
|
||||
break;
|
||||
|
|
|
@ -106,13 +106,19 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
|
|||
|
||||
nb_lcores = rte_lcore_count() - 1;
|
||||
|
||||
if (enabled_cdev_count > nb_lcores) {
|
||||
printf("Number of capable crypto devices (%d) "
|
||||
"has to be less or equal to number of slave "
|
||||
"cores (%d)\n", enabled_cdev_count, nb_lcores);
|
||||
if (nb_lcores < 1) {
|
||||
RTE_LOG(ERR, USER1,
|
||||
"Number of enabled cores need to be higher than 1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use less number of devices,
|
||||
* if there are more available than cores.
|
||||
*/
|
||||
if (enabled_cdev_count > nb_lcores)
|
||||
enabled_cdev_count = nb_lcores;
|
||||
|
||||
/* Create a mempool shared by all the devices */
|
||||
uint32_t max_sess_size = 0, sess_size;
|
||||
|
||||
|
|
|
@ -2127,7 +2127,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
|
|||
.data = NULL,
|
||||
.help_str = "port <port_id> rxq|txq <queue_id> start|stop",
|
||||
.tokens = {
|
||||
(void *)&cmd_config_speed_all_port,
|
||||
(void *)&cmd_config_rxtx_queue_port,
|
||||
(void *)&cmd_config_rxtx_queue_portid,
|
||||
(void *)&cmd_config_rxtx_queue_rxtxq,
|
||||
(void *)&cmd_config_rxtx_queue_qid,
|
||||
|
|
|
@ -2028,7 +2028,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
|
|||
i = ctx->objdata >> 16;
|
||||
if (!strcmp_partial("end", str, len)) {
|
||||
ctx->objdata &= 0xffff;
|
||||
return len;
|
||||
goto end;
|
||||
}
|
||||
if (i >= ACTION_RSS_NUM)
|
||||
return -1;
|
||||
|
@ -2045,6 +2045,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
|
|||
if (ctx->next_num == RTE_DIM(ctx->next))
|
||||
return -1;
|
||||
ctx->next[ctx->next_num++] = next;
|
||||
end:
|
||||
if (!ctx->object)
|
||||
return len;
|
||||
((struct rte_flow_action_rss *)ctx->object)->num = i;
|
||||
|
|
|
@ -1624,10 +1624,12 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
|
|||
|
||||
np.shaper_profile_id = res->shaper_profile_id;
|
||||
np.n_shared_shapers = n_shared_shapers;
|
||||
if (np.n_shared_shapers)
|
||||
if (np.n_shared_shapers) {
|
||||
np.shared_shaper_id = &shared_shaper_id[0];
|
||||
else
|
||||
np.shared_shaper_id = NULL;
|
||||
} else {
|
||||
free(shared_shaper_id);
|
||||
shared_shaper_id = NULL;
|
||||
}
|
||||
|
||||
np.nonleaf.n_sp_priorities = res->n_sp_priorities;
|
||||
np.stats_mask = res->stats_mask;
|
||||
|
@ -1779,10 +1781,12 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
|
|||
np.shaper_profile_id = res->shaper_profile_id;
|
||||
np.n_shared_shapers = n_shared_shapers;
|
||||
|
||||
if (np.n_shared_shapers)
|
||||
if (np.n_shared_shapers) {
|
||||
np.shared_shaper_id = &shared_shaper_id[0];
|
||||
else
|
||||
np.shared_shaper_id = NULL;
|
||||
} else {
|
||||
free(shared_shaper_id);
|
||||
shared_shaper_id = NULL;
|
||||
}
|
||||
|
||||
np.leaf.cman = res->cman_mode;
|
||||
np.leaf.wred.wred_profile_id = res->wred_profile_id;
|
||||
|
|
|
@ -149,15 +149,11 @@ nic_stats_display(portid_t port_id)
|
|||
struct rte_eth_stats stats;
|
||||
struct rte_port *port = &ports[port_id];
|
||||
uint8_t i;
|
||||
portid_t pid;
|
||||
|
||||
static const char *nic_stats_border = "########################";
|
||||
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return;
|
||||
}
|
||||
rte_eth_stats_get(port_id, &stats);
|
||||
|
@ -231,13 +227,8 @@ nic_stats_display(portid_t port_id)
|
|||
void
|
||||
nic_stats_clear(portid_t port_id)
|
||||
{
|
||||
portid_t pid;
|
||||
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return;
|
||||
}
|
||||
rte_eth_stats_reset(port_id);
|
||||
|
@ -314,15 +305,11 @@ nic_stats_mapping_display(portid_t port_id)
|
|||
{
|
||||
struct rte_port *port = &ports[port_id];
|
||||
uint16_t i;
|
||||
portid_t pid;
|
||||
|
||||
static const char *nic_stats_mapping_border = "########################";
|
||||
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -434,14 +421,10 @@ port_infos_display(portid_t port_id)
|
|||
int vlan_offload;
|
||||
struct rte_mempool * mp;
|
||||
static const char *info_border = "*********************";
|
||||
portid_t pid;
|
||||
uint16_t mtu;
|
||||
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return;
|
||||
}
|
||||
port = &ports[port_id];
|
||||
|
@ -739,6 +722,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning)
|
|||
return 1;
|
||||
}
|
||||
|
||||
void print_valid_ports(void)
|
||||
{
|
||||
portid_t pid;
|
||||
|
||||
printf("The valid ports array is [");
|
||||
RTE_ETH_FOREACH_DEV(pid) {
|
||||
printf(" %d", pid);
|
||||
}
|
||||
printf(" ]\n");
|
||||
}
|
||||
|
||||
static int
|
||||
vlan_id_is_invalid(uint16_t vlan_id)
|
||||
{
|
||||
|
|
|
@ -403,7 +403,6 @@ parse_portnuma_config(const char *q_arg)
|
|||
};
|
||||
unsigned long int_fld[_NUM_FLD];
|
||||
char *str_fld[_NUM_FLD];
|
||||
portid_t pid;
|
||||
|
||||
/* reset from value set at definition */
|
||||
while ((p = strchr(p0,'(')) != NULL) {
|
||||
|
@ -427,10 +426,7 @@ parse_portnuma_config(const char *q_arg)
|
|||
port_id = (portid_t)int_fld[FLD_PORT];
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
|
||||
port_id == (portid_t)RTE_PORT_ALL) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return -1;
|
||||
}
|
||||
socket_id = (uint8_t)int_fld[FLD_SOCKET];
|
||||
|
@ -461,7 +457,6 @@ parse_ringnuma_config(const char *q_arg)
|
|||
};
|
||||
unsigned long int_fld[_NUM_FLD];
|
||||
char *str_fld[_NUM_FLD];
|
||||
portid_t pid;
|
||||
#define RX_RING_ONLY 0x1
|
||||
#define TX_RING_ONLY 0x2
|
||||
#define RXTX_RING 0x3
|
||||
|
@ -488,10 +483,7 @@ parse_ringnuma_config(const char *q_arg)
|
|||
port_id = (portid_t)int_fld[FLD_PORT];
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
|
||||
port_id == (portid_t)RTE_PORT_ALL) {
|
||||
printf("Valid port range is [0");
|
||||
RTE_ETH_FOREACH_DEV(pid)
|
||||
printf(", %d", pid);
|
||||
printf("]\n");
|
||||
print_valid_ports();
|
||||
return -1;
|
||||
}
|
||||
socket_id = (uint8_t)int_fld[FLD_SOCKET];
|
||||
|
|
|
@ -354,7 +354,7 @@ struct rte_fdir_conf fdir_conf = {
|
|||
.pballoc = RTE_FDIR_PBALLOC_64K,
|
||||
.status = RTE_FDIR_REPORT_STATUS,
|
||||
.mask = {
|
||||
.vlan_tci_mask = 0x0,
|
||||
.vlan_tci_mask = 0xFFEF,
|
||||
.ipv4_mask = {
|
||||
.src_ip = 0xFFFFFFFF,
|
||||
.dst_ip = 0xFFFFFFFF,
|
||||
|
@ -880,18 +880,23 @@ init_fwd_streams(void)
|
|||
|
||||
/* init new */
|
||||
nb_fwd_streams = nb_fwd_streams_new;
|
||||
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
|
||||
sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
|
||||
if (fwd_streams == NULL)
|
||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
|
||||
"failed\n", nb_fwd_streams);
|
||||
if (nb_fwd_streams) {
|
||||
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
|
||||
sizeof(struct fwd_stream *) * nb_fwd_streams,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (fwd_streams == NULL)
|
||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
|
||||
" (struct fwd_stream *)) failed\n",
|
||||
nb_fwd_streams);
|
||||
|
||||
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
|
||||
fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
|
||||
sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
|
||||
if (fwd_streams[sm_id] == NULL)
|
||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
|
||||
" failed\n");
|
||||
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
|
||||
fwd_streams[sm_id] = rte_zmalloc("testpmd:"
|
||||
" struct fwd_stream", sizeof(struct fwd_stream),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (fwd_streams[sm_id] == NULL)
|
||||
rte_exit(EXIT_FAILURE, "rte_zmalloc"
|
||||
"(struct fwd_stream) failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -925,6 +930,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
|
|||
pktnb_stats[1] = pktnb_stats[0];
|
||||
burst_stats[0] = nb_burst;
|
||||
pktnb_stats[0] = nb_pkt;
|
||||
} else if (nb_burst > burst_stats[1]) {
|
||||
burst_stats[1] = nb_burst;
|
||||
pktnb_stats[1] = nb_pkt;
|
||||
}
|
||||
}
|
||||
if (total_burst == 0)
|
||||
|
@ -1210,6 +1218,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the forward ports list.
|
||||
*/
|
||||
void
|
||||
update_fwd_ports(portid_t new_pid)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int new_nb_fwd_ports = 0;
|
||||
int move = 0;
|
||||
|
||||
for (i = 0; i < nb_fwd_ports; ++i) {
|
||||
if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
|
||||
move = 1;
|
||||
else if (move)
|
||||
fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
|
||||
else
|
||||
new_nb_fwd_ports++;
|
||||
}
|
||||
if (new_pid < RTE_MAX_ETHPORTS)
|
||||
fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
|
||||
|
||||
nb_fwd_ports = new_nb_fwd_ports;
|
||||
nb_cfg_ports = new_nb_fwd_ports;
|
||||
}
|
||||
|
||||
/*
|
||||
* Launch packet forwarding configuration.
|
||||
*/
|
||||
|
@ -1245,10 +1278,6 @@ start_packet_forwarding(int with_tx_first)
|
|||
return;
|
||||
}
|
||||
|
||||
if (init_fwd_streams() < 0) {
|
||||
printf("Fail from init_fwd_streams()\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if(dcb_test) {
|
||||
for (i = 0; i < nb_fwd_ports; i++) {
|
||||
|
@ -1268,10 +1297,11 @@ start_packet_forwarding(int with_tx_first)
|
|||
}
|
||||
test_done = 0;
|
||||
|
||||
fwd_config_setup();
|
||||
|
||||
if(!no_flush_rx)
|
||||
flush_fwd_rx_queues();
|
||||
|
||||
fwd_config_setup();
|
||||
pkt_fwd_config_display(&cur_fwd_config);
|
||||
rxtx_config_display();
|
||||
|
||||
|
@ -1876,6 +1906,8 @@ attach_port(char *identifier)
|
|||
|
||||
ports[pi].port_status = RTE_PORT_STOPPED;
|
||||
|
||||
update_fwd_ports(pi);
|
||||
|
||||
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
|
||||
printf("Done\n");
|
||||
}
|
||||
|
@ -1902,6 +1934,8 @@ detach_port(portid_t port_id)
|
|||
|
||||
nb_ports = rte_eth_dev_count();
|
||||
|
||||
update_fwd_ports(RTE_MAX_ETHPORTS);
|
||||
|
||||
printf("Port '%s' is detached. Now total ports is %d\n",
|
||||
name, nb_ports);
|
||||
printf("Done\n");
|
||||
|
@ -1995,13 +2029,16 @@ check_all_ports_link_status(uint32_t port_mask)
|
|||
static void
|
||||
rmv_event_callback(void *arg)
|
||||
{
|
||||
int org_no_link_check = no_link_check;
|
||||
struct rte_eth_dev *dev;
|
||||
portid_t port_id = (intptr_t)arg;
|
||||
|
||||
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
||||
dev = &rte_eth_devices[port_id];
|
||||
|
||||
no_link_check = 1;
|
||||
stop_port(port_id);
|
||||
no_link_check = org_no_link_check;
|
||||
close_port(port_id);
|
||||
printf("removing device %s\n", dev->device->name);
|
||||
if (rte_eal_dev_detach(dev->device))
|
||||
|
@ -2246,7 +2283,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid)
|
|||
struct rte_port *port;
|
||||
|
||||
port = &ports[slave_pid];
|
||||
return port->slave_flag;
|
||||
if ((rte_eth_devices[slave_pid].data->dev_flags &
|
||||
RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uint16_t vlan_tags[] = {
|
||||
|
@ -2257,12 +2297,14 @@ const uint16_t vlan_tags[] = {
|
|||
};
|
||||
|
||||
static int
|
||||
get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
|
||||
get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
|
||||
enum dcb_mode_enable dcb_mode,
|
||||
enum rte_eth_nb_tcs num_tcs,
|
||||
uint8_t pfc_en)
|
||||
{
|
||||
uint8_t i;
|
||||
int32_t rc;
|
||||
struct rte_eth_rss_conf rss_conf;
|
||||
|
||||
/*
|
||||
* Builds up the correct configuration for dcb+vt based on the vlan tags array
|
||||
|
@ -2302,6 +2344,10 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
|
|||
struct rte_eth_dcb_tx_conf *tx_conf =
|
||||
ð_conf->tx_adv_conf.dcb_tx_conf;
|
||||
|
||||
rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
rx_conf->nb_tcs = num_tcs;
|
||||
tx_conf->nb_tcs = num_tcs;
|
||||
|
||||
|
@ -2309,8 +2355,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
|
|||
rx_conf->dcb_tc[i] = i % num_tcs;
|
||||
tx_conf->dcb_tc[i] = i % num_tcs;
|
||||
}
|
||||
|
||||
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
|
||||
eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
|
||||
eth_conf->rx_adv_conf.rss_conf = rss_conf;
|
||||
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
|
||||
}
|
||||
|
||||
|
@ -2341,7 +2388,7 @@ init_port_dcb_config(portid_t pid,
|
|||
dcb_config = 1;
|
||||
|
||||
/*set configuration of DCB in vt mode and DCB in non-vt mode*/
|
||||
retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
|
||||
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
port_conf.rxmode.hw_vlan_filter = 1;
|
||||
|
|
|
@ -599,6 +599,7 @@ void fwd_config_setup(void);
|
|||
void set_def_fwd_config(void);
|
||||
void reconfig(portid_t new_port_id, unsigned socket_id);
|
||||
int init_fwd_streams(void);
|
||||
void update_fwd_ports(portid_t new_pid);
|
||||
|
||||
void port_mtu_set(portid_t port_id, uint16_t mtu);
|
||||
void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
|
||||
|
@ -726,6 +727,7 @@ enum print_warning {
|
|||
DISABLED_WARN
|
||||
};
|
||||
int port_id_is_invalid(portid_t port_id, enum print_warning warning);
|
||||
void print_valid_ports(void);
|
||||
int new_socket_id(unsigned int socket_id);
|
||||
|
||||
queueid_t get_allowed_max_nb_rxq(portid_t *pid);
|
||||
|
|
|
@ -41,7 +41,7 @@ HOSTAPP = dpdk-pmdinfogen
|
|||
#
|
||||
SRCS-y += pmdinfogen.c
|
||||
|
||||
HOST_CFLAGS += $(WERROR_FLAGS) -g
|
||||
HOST_CFLAGS += $(HOST_WERROR_FLAGS) -g
|
||||
HOST_CFLAGS += -I$(RTE_OUTPUT)/include
|
||||
|
||||
include $(RTE_SDK)/mk/rte.hostapp.mk
|
||||
|
|
|
@ -46,11 +46,6 @@ CONFIG_RTE_TOOLCHAIN_ICC=y
|
|||
#
|
||||
CONFIG_RTE_LIBRTE_KNI=n
|
||||
|
||||
#
|
||||
# Vectorized PMD is not supported on 32-bit
|
||||
#
|
||||
CONFIG_RTE_IXGBE_INC_VECTOR=n
|
||||
|
||||
#
|
||||
# Solarflare PMD is not supported on 32-bit
|
||||
#
|
||||
|
|
|
@ -252,6 +252,35 @@ Generic Flow API is supported. The baseline support is:
|
|||
More features may be added in future firmware and new versions of the VIC.
|
||||
Please refer to the release notes.
|
||||
|
||||
Ingress VLAN Rewrite
|
||||
--------------------
|
||||
|
||||
VIC adapters can tag, untag, or modify the VLAN headers of ingress
|
||||
packets. The ingress VLAN rewrite mode controls this behavior. By
|
||||
default, it is set to pass-through, where the NIC does not modify the
|
||||
VLAN header in any way so that the application can see the original
|
||||
header. This mode is sufficient for many applications, but may not be
|
||||
suitable for others. Such applications may change the mode by setting
|
||||
``devargs`` parameter ``ig-vlan-rewrite`` to one of the following.
|
||||
|
||||
- ``pass``: Pass-through mode. The NIC does not modify the VLAN
|
||||
header. This is the default mode.
|
||||
|
||||
- ``priority``: Priority-tag default VLAN mode. If the ingress packet
|
||||
is tagged with the default VLAN, the NIC replaces its VLAN header
|
||||
with the priority tag (VLAN ID 0).
|
||||
|
||||
- ``trunk``: Default trunk mode. The NIC tags untagged ingress packets
|
||||
with the default VLAN. Tagged ingress packets are not modified. To
|
||||
the application, every packet appears as tagged.
|
||||
|
||||
- ``untag``: Untag default VLAN mode. If the ingress packet is tagged
|
||||
with the default VLAN, the NIC removes or untags its VLAN header so
|
||||
that the application sees an untagged packet. As a result, the
|
||||
default VLAN becomes `untagged`. This mode can be useful for
|
||||
applications such as OVS-DPDK performance benchmarks that utilize
|
||||
only the default VLAN and want to see only untagged packets.
|
||||
|
||||
.. _enic_limitations:
|
||||
|
||||
Limitations
|
||||
|
@ -267,9 +296,10 @@ Limitations
|
|||
In test setups where an Ethernet port of a Cisco adapter in TRUNK mode is
|
||||
connected point-to-point to another adapter port or connected though a router
|
||||
instead of a switch, all ingress packets will be VLAN tagged. Programs such
|
||||
as l3fwd which do not account for VLAN tags in packets will misbehave. The
|
||||
solution is to enable VLAN stripping on ingress. The follow code fragment is
|
||||
example of how to accomplish this:
|
||||
as l3fwd may not account for VLAN tags in packets and may misbehave. One
|
||||
solution is to enable VLAN stripping on ingress so the VLAN tag is removed
|
||||
from the packet and put into the mbuf->vlan_tci field. Here is an example
|
||||
of how to accomplish this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -277,6 +307,14 @@ Limitations
|
|||
vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
|
||||
rte_eth_dev_set_vlan_offload(port, vlan_offload);
|
||||
|
||||
Another alternative is modify the adapter's ingress VLAN rewrite mode so that
|
||||
packets with the default VLAN tag are stripped by the adapter and presented to
|
||||
DPDK as untagged packets. In this case mbuf->vlan_tci and the PKT_RX_VLAN and
|
||||
PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
|
||||
``devargs`` parameter ``ig-vlan-rewrite=untag``. For example::
|
||||
|
||||
-w 12:00.0,ig-vlan-rewrite=untag
|
||||
|
||||
- Limited flow director support on 1200 series and 1300 series Cisco VIC
|
||||
adapters with old firmware. Please see :ref:`enic-flow-director`.
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ CRC offload = Y
|
|||
VLAN offload = Y
|
||||
L3 checksum offload = Y
|
||||
L4 checksum offload = Y
|
||||
Timestamp offload = Y
|
||||
Packet type parsing = Y
|
||||
Rx descriptor status = Y
|
||||
Tx descriptor status = Y
|
||||
|
|
|
@ -108,7 +108,11 @@ Limitations
|
|||
- Port statistics through software counters only. Flow statistics are
|
||||
supported by hardware counters.
|
||||
- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
|
||||
- Forked secondary process not supported.
|
||||
- For secondary process:
|
||||
|
||||
- Forked secondary process not supported.
|
||||
- All mempools must be initialized before rte_eth_dev_start().
|
||||
|
||||
- Flow pattern without any specific vlan will match for vlan packets as well:
|
||||
|
||||
When VLAN spec is not specified in the pattern, the matching rule will be created with VLAN as a wild card.
|
||||
|
|
|
@ -34,14 +34,14 @@ NFP poll mode driver library
|
|||
Netronome's sixth generation of flow processors pack 216 programmable
|
||||
cores and over 100 hardware accelerators that uniquely combine packet,
|
||||
flow, security and content processing in a single device that scales
|
||||
up to 400 Gbps.
|
||||
up to 400-Gb/s.
|
||||
|
||||
This document explains how to use DPDK with the Netronome Poll Mode
|
||||
Driver (PMD) supporting Netronome's Network Flow Processor 6xxx
|
||||
(NFP-6xxx) and Netronome's Flow Processor 4xxx (NFP-4xxx).
|
||||
|
||||
NFP is a SRIOV capable device and the PMD driver supports the physical
|
||||
function (PF) and virtual functions (VFs).
|
||||
function (PF) and the virtual functions (VFs).
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
@ -49,17 +49,18 @@ Dependencies
|
|||
Before using the Netronome's DPDK PMD some NFP configuration,
|
||||
which is not related to DPDK, is required. The system requires
|
||||
installation of **Netronome's BSP (Board Support Package)** along
|
||||
with some specific NFP firmware application. Netronome's NSP ABI
|
||||
with a specific NFP firmware application. Netronome's NSP ABI
|
||||
version should be 0.20 or higher.
|
||||
|
||||
If you have a NFP device you should already have the code and
|
||||
documentation for doing all this configuration. Contact
|
||||
documentation for this configuration. Contact
|
||||
**support@netronome.com** to obtain the latest available firmware.
|
||||
|
||||
The NFP Linux netdev kernel driver for VFs is part of vanilla kernel
|
||||
since kernel version 4.5, and support for the PF since kernel version
|
||||
4.11. Support for older kernels can be obtained on Github at
|
||||
**https://github.com/Netronome/nfp-drv-kmods** along with build
|
||||
The NFP Linux netdev kernel driver for VFs has been a part of the
|
||||
vanilla kernel since kernel version 4.5, and support for the PF
|
||||
since kernel version 4.11. Support for older kernels can be obtained
|
||||
on Github at
|
||||
**https://github.com/Netronome/nfp-drv-kmods** along with the build
|
||||
instructions.
|
||||
|
||||
NFP PMD needs to be used along with UIO ``igb_uio`` or VFIO (``vfio-pci``)
|
||||
|
@ -70,15 +71,15 @@ Building the software
|
|||
|
||||
Netronome's PMD code is provided in the **drivers/net/nfp** directory.
|
||||
Although NFP PMD has Netronome´s BSP dependencies, it is possible to
|
||||
compile it along with other DPDK PMDs even if no BSP was installed before.
|
||||
compile it along with other DPDK PMDs even if no BSP was installed previously.
|
||||
Of course, a DPDK app will require such a BSP installed for using the
|
||||
NFP PMD, along with a specific NFP firmware application.
|
||||
|
||||
Default PMD configuration is at **common_linuxapp configuration** file:
|
||||
Default PMD configuration is at the **common_linuxapp configuration** file:
|
||||
|
||||
- **CONFIG_RTE_LIBRTE_NFP_PMD=y**
|
||||
|
||||
Once DPDK is built all the DPDK apps and examples include support for
|
||||
Once the DPDK is built all the DPDK apps and examples include support for
|
||||
the NFP PMD.
|
||||
|
||||
|
||||
|
@ -91,18 +92,18 @@ for details.
|
|||
Using the PF
|
||||
------------
|
||||
|
||||
NFP PMD has support for using the NFP PF as another DPDK port, but it does not
|
||||
NFP PMD supports using the NFP PF as another DPDK port, but it does not
|
||||
have any functionality for controlling VFs. In fact, it is not possible to use
|
||||
the PMD with the VFs if the PF is being used by DPDK, that is, with the NFP PF
|
||||
bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK version will
|
||||
bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK versions will
|
||||
have a PMD able to work with the PF and VFs at the same time and with the PF
|
||||
implementing VF management along with other PF-only functionalities/offloads.
|
||||
|
||||
The PMD PF has extra work to do which will delay the DPDK app initialization
|
||||
like checking if a firmware is already available in the device, uploading the
|
||||
firmware if necessary, and configure the Link state properly when starting or
|
||||
stopping a PF port. Note that firmware upload is not always necessary which is
|
||||
the main delay for NFP PF PMD initialization.
|
||||
The PMD PF has extra work to do which will delay the DPDK app initialization.
|
||||
This additional effort could be checking if a firmware is already available in
|
||||
the device, uploading the firmware if necessary or configuring the Link state
|
||||
properly when starting or stopping a PF port. Note that firmware upload is not
|
||||
always necessary which is the main delay for NFP PF PMD initialization.
|
||||
|
||||
Depending on the Netronome product installed in the system, firmware files
|
||||
should be available under ``/lib/firmware/netronome``. DPDK PMD supporting the
|
||||
|
@ -114,14 +115,14 @@ PF multiport support
|
|||
--------------------
|
||||
|
||||
Some NFP cards support several physical ports with just one single PCI device.
|
||||
DPDK core is designed with the 1:1 relationship between PCI devices and DPDK
|
||||
The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK
|
||||
ports, so NFP PMD PF support requires handling the multiport case specifically.
|
||||
During NFP PF initialization, the PMD will extract the information about the
|
||||
number of PF ports from the firmware and will create as many DPDK ports as
|
||||
needed.
|
||||
|
||||
Because the unusual relationship between a single PCI device and several DPDK
|
||||
ports, there are some limitations when using more than one PF DPDK ports: there
|
||||
ports, there are some limitations when using more than one PF DPDK port: there
|
||||
is no support for RX interrupts and it is not possible either to use those PF
|
||||
ports with the device hotplug functionality.
|
||||
|
||||
|
@ -136,7 +137,7 @@ System configuration
|
|||
get the drivers from the above Github repository and follow the instructions
|
||||
for building and installing it.
|
||||
|
||||
Virtual Functions need to be enabled before they can be used with the PMD.
|
||||
VFs need to be enabled before they can be used with the PMD.
|
||||
Before enabling the VFs it is useful to obtain information about the
|
||||
current NFP PCI device detected by the system:
|
||||
|
||||
|
|
|
@ -86,12 +86,13 @@ Prerequisites
|
|||
`QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
|
||||
For downloading firmware file, select adapter category, model and DPDK Poll Mode Driver.
|
||||
|
||||
- Requires management firmware (MFW) version **8.30.x.x** or higher to be
|
||||
flashed on to the adapter. If the required management firmware is not
|
||||
available then download from
|
||||
`QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
|
||||
For downloading firmware upgrade utility, select adapter category, model and Linux distro.
|
||||
To flash the management firmware refer to the instructions in the QLogic Firmware Upgrade Utility Readme document.
|
||||
- Requires the NIC be updated minimally with **8.30.x.x** Management firmware(MFW) version supported for that NIC.
|
||||
It is highly recommended that the NIC be updated with the latest available management firmware version to get latest feature set.
|
||||
Management Firmware and Firmware Upgrade Utility for Cavium FastLinQ(r) branded adapters can be downloaded from
|
||||
`Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
|
||||
For downloading Firmware Upgrade Utility, select NIC category, model and Linux distro.
|
||||
To update the management firmware, refer to the instructions in the Firmware Upgrade Utility Readme document.
|
||||
For OEM branded adapters please follow the instruction provided by the OEM to update the Management Firmware on the NIC.
|
||||
|
||||
- SR-IOV requires Linux PF driver version **8.20.x.x** or higher.
|
||||
If the required PF driver is not available then download it from
|
||||
|
|
|
@ -1090,3 +1090,375 @@ Fixes in 17.11 LTS Release
|
|||
* vhost: handle virtually non-contiguous buffers in Rx-mrg (fixes CVE-2018-1059)
|
||||
* vhost: handle virtually non-contiguous buffers in Tx (fixes CVE-2018-1059)
|
||||
* vhost: introduce safe API for GPA translation (fixes CVE-2018-1059)
|
||||
|
||||
17.11.3
|
||||
~~~~~~~
|
||||
|
||||
* app/crypto-perf: check minimum lcore number
|
||||
* app/crypto-perf: fix excess crypto device error
|
||||
* app/crypto-perf: fix IOVA translation
|
||||
* app/crypto-perf: fix parameters copy
|
||||
* app/crypto-perf: use strcpy for allocated string
|
||||
* app/procinfo: fix strncpy usage in args parsing
|
||||
* app/testpmd: fix burst stats reporting
|
||||
* app/testpmd: fix command token
|
||||
* app/testpmd: fix empty list of RSS queues for flow
|
||||
* app/testpmd: fix forward ports Rx flush
|
||||
* app/testpmd: fix forward ports update
|
||||
* app/testpmd: fix removed device link status asking
|
||||
* app/testpmd: fix slave port detection
|
||||
* app/testpmd: fix synchronic port hotplug
|
||||
* app/testpmd: fix valid ports prints
|
||||
* bus/dpaa: fix resource leak
|
||||
* bus/fslmc: fix find device start condition
|
||||
* bus/pci: fix find device implementation
|
||||
* bus/vdev: fix finding device by name
|
||||
* cryptodev: fix supported size check
|
||||
* crypto/dpaa2_sec: fix HMAC supported digest sizes
|
||||
* crypto/scheduler: fix 64-bit mask of workers cores
|
||||
* crypto/scheduler: fix memory leak
|
||||
* crypto/scheduler: fix multicore rings re-use
|
||||
* crypto/scheduler: fix possible duplicated ring names
|
||||
* crypto/scheduler: set null pointer after freeing
|
||||
* crypto/zuc: batch ops with same transform
|
||||
* crypto/zuc: do not set default op status
|
||||
* doc: add timestamp offload to mlx5 features
|
||||
* doc: fix NFP NIC guide grammar
|
||||
* drivers/net: fix link autoneg value for virtual PMDs
|
||||
* eal/ppc: remove braces in SMP memory barrier macro
|
||||
* ethdev: fix port accessing after release
|
||||
* ethdev: fix queue start
|
||||
* event/dpaa2: remove link from info structure
|
||||
* examples/exception_path: limit core count to 64
|
||||
* examples/l2fwd-crypto: fix the default aead assignments
|
||||
* examples/performance-thread: fix return type of threads
|
||||
* examples/quota_watermark: fix return type of threads
|
||||
* hash: fix missing spinlock unlock in add key
|
||||
* ip_frag: fix double free of chained mbufs
|
||||
* kni: fix build on CentOS 7.4
|
||||
* kni: fix build on RHEL 7.5
|
||||
* mbuf: fix Tx checksum offload API doc
|
||||
* mbuf: improve tunnel Tx offloads API doc
|
||||
* mem: do not use physical addresses in IOVA as VA mode
|
||||
* mempool: fix leak when no objects are populated
|
||||
* mempool: fix virtual address population
|
||||
* mk: fix make defconfig on FreeBSD
|
||||
* net: add IPv6 header fields macros
|
||||
* net/bnx2x: do not cast function pointers as a policy
|
||||
* net/bnx2x: fix for PCI FLR after ungraceful exit
|
||||
* net/bnx2x: fix KR2 device check
|
||||
* net/bnx2x: fix memzone name overrun
|
||||
* net/bnxt: avoid invalid vnic id in set L2 Rx mask
|
||||
* net/bnxt: fix endianness of flag
|
||||
* net/bnxt: fix license header
|
||||
* net/bnxt: fix LRO disable
|
||||
* net/bnxt: fix Rx checksum flags
|
||||
* net/bnxt: fix Rx checksum flags for tunnel frames
|
||||
* net/bnxt: fix Rx drop setting
|
||||
* net/bnxt: fix Rx mbuf and agg ring leak in dev stop
|
||||
* net/bnxt: fix usage of vnic id
|
||||
* net/bnxt: free memory allocated for VF filters
|
||||
* net/bnxt: set padding flags in Rx descriptor
|
||||
* net/bonding: clear started state if start fails
|
||||
* net/bonding: export mode 4 slave info routine
|
||||
* net/bonding: fix primary slave port id storage type
|
||||
* net/bonding: fix setting VLAN ID on slave ports
|
||||
* net/bonding: fix slave activation simultaneously
|
||||
* net/bonding: free mempool used in mode 6
|
||||
* net/dpaa2: fix xstats
|
||||
* net/dpaa: fix oob access
|
||||
* net/enic: allocate stats DMA buffer upfront during probe
|
||||
* net/enic: fix crash on MTU update with non-setup queues
|
||||
* net/failsafe: fix duplicate event registration
|
||||
* net/failsafe: fix probe cleanup
|
||||
* net/failsafe: fix removed sub-device cleanup
|
||||
* net/i40e: fix DDP profile DEL operation
|
||||
* net/i40e: fix failing to disable FDIR Tx queue
|
||||
* net/i40e: fix intr callback unregister by adding retry
|
||||
* net/i40e: fix link status update
|
||||
* net/i40e: fix link update no wait
|
||||
* net/i40e: fix shifts of signed values
|
||||
* net/ixgbe: enable vector PMD for icc 32 bits
|
||||
* net/ixgbe: fix busy wait during checking link status
|
||||
* net/ixgbe: fix DCB configuration
|
||||
* net/ixgbe: fix intr callback unregister by adding retry
|
||||
* net/ixgbe: fix too many interrupts
|
||||
* net/liquidio: fix link state fetching during start
|
||||
* net/mlx4: avoid constant recreations in function
|
||||
* net/mlx4: fix a typo in header file
|
||||
* net/mlx4: fix broadcast Rx
|
||||
* net/mlx4: fix removal detection of stopped port
|
||||
* net/mlx4: fix RSS resource leak in case of error
|
||||
* net/mlx4: fix Rx resource leak in case of error
|
||||
* net/mlx4: fix single port configuration
|
||||
* net/mlx4: fix UDP flow rule limitation enforcement
|
||||
* net/mlx4: store RSS hash result in mbufs
|
||||
* net/mlx5: add data-plane debug message macro
|
||||
* net/mlx5: add missing function documentation
|
||||
* net/mlx5: add packet type index for TCP ack
|
||||
* net/mlx5: change device reference for secondary process
|
||||
* net/mlx5: change non failing function return values
|
||||
* net/mlx5: change pkt burst select function prototype
|
||||
* net/mlx5: change tunnel flow priority
|
||||
* net/mlx5: enforce RSS key length limitation
|
||||
* net/mlx5: fix allocation when no memory on device NUMA node
|
||||
* net/mlx5: fix build with clang on ARM
|
||||
* net/mlx5: fix calculation of Tx TSO inline room size
|
||||
* net/mlx5: fix close after start failure
|
||||
* net/mlx5: fix count in xstats
|
||||
* net/mlx5: fix CRC strip capability query
|
||||
* net/mlx5: fix disabling Tx packet inlining
|
||||
* net/mlx5: fix double free on error handling
|
||||
* net/mlx5: fix ethtool link setting call order
|
||||
* net/mlx5: fix existing file removal
|
||||
* net/mlx5: fix flow creation with a single target queue
|
||||
* net/mlx5: fix flow director conversion
|
||||
* net/mlx5: fix flow director drop rule deletion crash
|
||||
* net/mlx5: fix flow director mask
|
||||
* net/mlx5: fix flow director rule deletion crash
|
||||
* net/mlx5: fix flow validation
|
||||
* net/mlx5: fix icc build
|
||||
* net/mlx5: fix invalid flow item check
|
||||
* net/mlx5: fix IPv6 header fields
|
||||
* net/mlx5: fix link status behavior
|
||||
* net/mlx5: fix link status initialization
|
||||
* net/mlx5: fix link status to use wait to complete
|
||||
* net/mlx5: fix probe return value polarity
|
||||
* net/mlx5: fix reception of multiple MAC addresses
|
||||
* net/mlx5: fix resource leak in case of error
|
||||
* net/mlx5: fix RSS flow action bounds check
|
||||
* net/mlx5: fix RSS key length query
|
||||
* net/mlx5: fix secondary process mempool registration
|
||||
* net/mlx5: fix socket connection return value
|
||||
* net/mlx5: fix sriov flag
|
||||
* net/mlx5: fix synchronization on polling Rx completions
|
||||
* net/mlx5: improve flow error explanation
|
||||
* net/mlx5: map UAR address around huge pages
|
||||
* net/mlx5: mark parameters with unused attribute
|
||||
* net/mlx5: name parameters in function prototypes
|
||||
* net/mlx5: normalize function prototypes
|
||||
* net/mlx5: prefix all functions with mlx5
|
||||
* net/mlx5: refuse empty VLAN flow specification
|
||||
* net/mlx5: remove 32-bit support
|
||||
* net/mlx5: remove assert un-accessible from secondary process
|
||||
* net/mlx5: remove control path locks
|
||||
* net/mlx5: remove excessive data prefetch
|
||||
* net/mlx5: remove get priv internal function
|
||||
* net/mlx5: remove kernel version check
|
||||
* net/mlx5: remove useless empty lines
|
||||
* net/mlx5: setup RSS regardless of queue count
|
||||
* net/mlx5: split L3/L4 in flow director
|
||||
* net/mlx5: standardize on negative errno values
|
||||
* net/mlx5: use dynamic logging
|
||||
* net/mlx5: use port id in PMD log
|
||||
* net/mlx5: warn for unsuccessful memory registration
|
||||
* net/mlx: control netdevices through ioctl only
|
||||
* net/mrvl: fix crash when port is closed without starting
|
||||
* net/mrvl: fix Rx descriptors number
|
||||
* net/nfp: fix assigning port id in mbuf
|
||||
* net/nfp: fix barrier location
|
||||
* net/nfp: fix link speed capabilities
|
||||
* net/nfp: fix mbufs releasing when stop or close
|
||||
* net/octeontx: fix null pointer dereference
|
||||
* net/octeontx: fix uninitialized speed variable
|
||||
* net/octeontx: fix uninitialized variable in port open
|
||||
* net/qede/base: fix to support OVLAN mode
|
||||
* net/qede: fix alloc from socket 0
|
||||
* net/qede: fix device stop to remove primary MAC
|
||||
* net/qede: fix L2-handles used for RSS hash update
|
||||
* net/qede: fix memory alloc for multiple port reconfig
|
||||
* net/qede: fix missing loop index in Tx SG mode
|
||||
* net/qede: fix multicast filtering
|
||||
* net/qede: fix to prevent overwriting packet type
|
||||
* net/qede: fix unicast filter routine return code
|
||||
* net/qede: fix VF port creation sequence
|
||||
* net/sfc: add missing defines for SAL annotation
|
||||
* net/sfc: add missing Rx fini on RSS setup fail path
|
||||
* net/sfc/base: fix comparison always true warning
|
||||
* net/sfc: fix mbuf data alignment calculation
|
||||
* net/sfc: fix type of opaque pointer in perf profile handler
|
||||
* net/sfc: ignore spec bits not covered by mask
|
||||
* net/sfc: process RSS settings on Rx configure step
|
||||
* net/szedata2: fix format string for PCI address
|
||||
* net/szedata2: fix total stats
|
||||
* net/tap: fix icc build
|
||||
* net/vhost: fix crash when creating vdev dynamically
|
||||
* net/vhost: fix invalid state
|
||||
* net/vhost: initialise device as inactive
|
||||
* net/vmxnet3: set the queue shared buffer at start
|
||||
* nfp: allow for non-root user
|
||||
* nfp: restore the unlink operation
|
||||
* nfp: unlink the appropriate lock file
|
||||
* pci: remove duplicated symbol from map file
|
||||
* test/distributor: fix return type of thread function
|
||||
* test: fix memory flags test for low NUMA nodes number
|
||||
* test/mempool: fix autotest retry
|
||||
* test/pipeline: fix return type of stub miss
|
||||
* test/pipeline: fix type of table entry parameter
|
||||
* test/reorder: fix freeing mbuf twice
|
||||
* vfio: do not needlessly check for IOVA mode
|
||||
* vhost: check cmsg not null
|
||||
* vhost: fix compilation issue when vhost debug enabled
|
||||
* vhost: fix dead lock on closing in server mode
|
||||
* vhost: fix device cleanup at stop
|
||||
* vhost: fix message payload union in setting ring address
|
||||
* vhost: fix offset while mmaping log base address
|
||||
* vhost: fix realloc failure
|
||||
* vhost: fix ring index returned to master on stop
|
||||
|
||||
17.11.4
|
||||
~~~~~~~
|
||||
|
||||
* app/crypto-perf: fix auth IV offset
|
||||
* app/testpmd: fix buffer leak in TM command
|
||||
* app/testpmd: fix DCB config
|
||||
* app/testpmd: fix VLAN TCI mask set error for FDIR
|
||||
* bitrate: add sanity check on parameters
|
||||
* bus/dpaa: fix buffer offset setting in FMAN
|
||||
* bus/dpaa: fix build
|
||||
* bus/dpaa: fix phandle support for Linux 4.16
|
||||
* bus/pci: use IOVAs check when setting IOVA mode
|
||||
* crypto/qat: fix checks for 3GPP algo bit params
|
||||
* doc: fix bonding command in testpmd
|
||||
* doc: update qede management firmware guide
|
||||
* eal: fix bitmap documentation
|
||||
* eal: fix return codes on thread naming failure
|
||||
* eal/linux: fix invalid syntax in interrupts
|
||||
* eal/linux: fix uninitialized value
|
||||
* ethdev: fix a doxygen comment for port allocation
|
||||
* ethdev: fix queue statistics mapping documentation
|
||||
* eventdev: add event buffer flush in Rx adapter
|
||||
* eventdev: fix internal port logic in Rx adapter
|
||||
* eventdev: fix missing update to Rx adaper WRR position
|
||||
* eventdev: fix port in Rx adapter internal function
|
||||
* eventdev: fix Rx SW adapter stop
|
||||
* event: fix ring init failure handling
|
||||
* event/octeontx: remove unnecessary port start and stop
|
||||
* examples/exception_path: fix out-of-bounds read
|
||||
* examples: fix strncpy error for GCC8
|
||||
* examples/flow_filtering: add flow director config for i40e
|
||||
* examples/ipsec-secgw: fix bypass rule processing
|
||||
* examples/ipsec-secgw: fix IPv4 checksum at Tx
|
||||
* examples/l2fwd-crypto: check return value on IV size check
|
||||
* examples/l2fwd-crypto: fix digest with AEAD algo
|
||||
* examples/l2fwd-crypto: skip device not supporting operation
|
||||
* examples/l3fwd: remove useless include
|
||||
* hash: fix a multi-writer race condition
|
||||
* hash: fix doxygen of return values
|
||||
* hash: fix key slot size accuracy
|
||||
* hash: fix multiwriter lock memory allocation
|
||||
* kni: fix build on RHEL 7.5
|
||||
* kni: fix build with gcc 8.1
|
||||
* kni: fix crash with null name
|
||||
* maintainers: claim maintainership for ARM v7 and v8
|
||||
* maintainers: update for Mellanox PMDs
|
||||
* mem: add function for checking memsegs IOVAs addresses
|
||||
* mem: fix max DMA maskbit size
|
||||
* mem: use address hint for mapping hugepages
|
||||
* metrics: add check for invalid key
|
||||
* metrics: disallow null as metric name
|
||||
* metrics: do not fail silently when uninitialised
|
||||
* mk: fix cross build
|
||||
* mk: fix permissions when using make install
|
||||
* mk: remove unnecessary test rules
|
||||
* mk: update targets for classified tests
|
||||
* net/bnx2x: fix FW command timeout during stop
|
||||
* net/bnx2x: fix poll link status
|
||||
* net/bnx2x: fix to set device link status
|
||||
* net/bnxt: add missing ids in xstats
|
||||
* net/bnxt: check access denied for HWRM commands
|
||||
* net/bnxt: check for invalid vNIC id
|
||||
* net/bnxt: fix filter freeing
|
||||
* net/bnxt: fix HW Tx checksum offload check
|
||||
* net/bnxt: fix lock release on NVM write failure
|
||||
* net/bnxt: fix memory leaks in NVM commands
|
||||
* net/bnxt: fix RETA size
|
||||
* net/bnxt: fix Rx ring count limitation
|
||||
* net/bnxt: fix set MTU
|
||||
* net/bnxt: fix to move a flow to a different queue
|
||||
* net/bnxt: use correct flags during VLAN configuration
|
||||
* net/bonding: always update bonding link status
|
||||
* net/bonding: do not clear active slave count
|
||||
* net/bonding: fix MAC address reset
|
||||
* net/bonding: fix race condition
|
||||
* net/cxgbe: fix init failure due to new flash parts
|
||||
* net/cxgbe: fix Rx channel map and queue type
|
||||
* net/dpaa2: remove loop for unused pool entries
|
||||
* net/ena: change memory type
|
||||
* net/ena: check pointer before memset
|
||||
* net/ena: fix GENMASK_ULL macro
|
||||
* net/ena: fix SIGFPE with 0 Rx queue
|
||||
* net/ena: set link speed as none
|
||||
* net/enic: add devarg to specify ingress VLAN rewrite mode
|
||||
* net/enic: do not overwrite admin Tx queue limit
|
||||
* net/i40e: fix check of flow director programming status
|
||||
* net/i40e: fix link speed
|
||||
* net/i40e: fix packet type parsing with DDP
|
||||
* net/i40e: fix setting TPID with AQ command
|
||||
* net/i40e: fix shifts of 32-bit value
|
||||
* net/i40e: revert fix of flow director check
|
||||
* net/i40e: workaround performance degradation
|
||||
* net/ixgbe: add support for VLAN in IP mode FDIR
|
||||
* net/ixgbe: fix mask bits register set error for FDIR
|
||||
* net/ixgbe: fix tunnel id format error for FDIR
|
||||
* net/ixgbe: fix tunnel type set error for FDIR
|
||||
* net/mlx4: check RSS queues number limitation
|
||||
* net/mlx4: fix minor resource leak during init
|
||||
* net/mlx5: add missing sanity checks for Tx completion queue
|
||||
* net/mlx5: fix assert for Tx completion queue count
|
||||
* net/mlx5: fix build with old kernels
|
||||
* net/mlx5: fix compilation for rdma-core v19
|
||||
* net/mlx5: fix crash in device probe
|
||||
* net/mlx5: fix error number handling
|
||||
* net/mlx5: fix flow search on FDIR deletion
|
||||
* net/mlx5: fix queue rollback when starting device
|
||||
* net/mlx5: fix return value when deleting fdir filter
|
||||
* net/mlx5: fix Rx buffer replenishment threshold
|
||||
* net/mlx5: fix secondary process resource leakage
|
||||
* net/mlx5: fix TCI mask filter
|
||||
* net/mlx5: preserve allmulticast flag for flow isolation mode
|
||||
* net/mlx5: preserve promiscuous flag for flow isolation mode
|
||||
* net/mvpp2: check pointer before using it
|
||||
* net/nfp: check hugepages IOVAs based on DMA mask
|
||||
* net/nfp: fix field initialization in Tx descriptor
|
||||
* net/nfp: support IOVA VA mode
|
||||
* net/octeontx: fix stop clearing Rx/Tx functions
|
||||
* net/pcap: fix multiple queues
|
||||
* net/qede/base: fix GRC attention callback
|
||||
* net/qede/base: fix to clear HW indication
|
||||
* net/qede: fix default extended VLAN offload config
|
||||
* net/qede: fix for devargs
|
||||
* net/qede: fix incorrect link status update
|
||||
* net/qede: fix interrupt handler unregister
|
||||
* net/qede: fix legacy interrupt mode
|
||||
* net/qede: fix link change event notification
|
||||
* net/qede: fix MAC address removal failure message
|
||||
* net/qede: fix ntuple filter configuration
|
||||
* net/qede: fix unicast MAC address handling in VF
|
||||
* net/qede: fix VF MTU update
|
||||
* net/qede: remove primary MAC removal
|
||||
* net/sfc: cut non VLAN ID bits from TCI
|
||||
* net/sfc: fix assert in set multicast address list
|
||||
* net/sfc: handle unknown L3 packet class in EF10 event parser
|
||||
* net/tap: fix zeroed flow mask configurations
|
||||
* net/thunderx: avoid sq door bell write on zero packet
|
||||
* net/thunderx: fix build with gcc optimization on
|
||||
* ring: fix sign conversion warning
|
||||
* security: fix crash on destroy null session
|
||||
* test/crypto: fix device id when stopping port
|
||||
* test: fix code on report
|
||||
* test: fix EAL flags autotest on FreeBSD
|
||||
* test: fix result printing
|
||||
* test: fix uninitialized port configuration
|
||||
* test/flow_classify: fix return types
|
||||
* test/hash: fix multiwriter with non consecutive cores
|
||||
* test/hash: fix potential memory leak
|
||||
* test: improve filtering
|
||||
* test: make autotest runner python 2/3 compliant
|
||||
* test: print autotest categories
|
||||
* vfio: fix PCI address comparison
|
||||
* vhost: fix missing increment of log cache count
|
||||
* vhost: flush IOTLB cache on new mem table handling
|
||||
* vhost: improve dirty pages logging performance
|
||||
* vhost: release locks on RARP packet failure
|
||||
* vhost: retranslate vring addr when memory table changes
|
||||
|
|
|
@ -1865,7 +1865,7 @@ Create a new bonding device::
|
|||
|
||||
For example, to create a bonded device in mode 1 on socket 0::
|
||||
|
||||
testpmd> create bonded 1 0
|
||||
testpmd> create bonded device 1 0
|
||||
created new bonded device (port X)
|
||||
|
||||
add bonding slave
|
||||
|
|
|
@ -475,6 +475,7 @@ fman_if_init(const struct device_node *dpa_node)
|
|||
if (!pool_node) {
|
||||
FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
|
||||
dname);
|
||||
free(bpool);
|
||||
goto err;
|
||||
}
|
||||
pname = pool_node->full_name;
|
||||
|
@ -482,6 +483,7 @@ fman_if_init(const struct device_node *dpa_node)
|
|||
prop = of_get_property(pool_node, "fsl,bpid", &proplen);
|
||||
if (!prop) {
|
||||
FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
|
||||
free(bpool);
|
||||
goto err;
|
||||
}
|
||||
assert(proplen == sizeof(*prop));
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#include <fsl_fman_crc64.h>
|
||||
#include <fsl_bman.h>
|
||||
|
||||
#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
|
||||
|
||||
/* Instantiate the global variable that the inline CRC64 implementation (in
|
||||
* <fsl_fman.h>) depends on.
|
||||
*/
|
||||
|
@ -445,20 +447,16 @@ fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
|
|||
int
|
||||
fman_if_get_fdoff(struct fman_if *fm_if)
|
||||
{
|
||||
u32 fmbm_ricp;
|
||||
u32 fmbm_rebm;
|
||||
int fdoff;
|
||||
int iceof_mask = 0x001f0000;
|
||||
int icsz_mask = 0x0000001f;
|
||||
|
||||
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
|
||||
|
||||
assert(fman_ccsr_map_fd != -1);
|
||||
|
||||
fmbm_ricp =
|
||||
in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp);
|
||||
/*iceof + icsz*/
|
||||
fdoff = ((fmbm_ricp & iceof_mask) >> 16) * 16 +
|
||||
(fmbm_ricp & icsz_mask) * 16;
|
||||
fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
|
||||
|
||||
fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
|
||||
|
||||
return fdoff;
|
||||
}
|
||||
|
@ -525,12 +523,16 @@ fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
|
|||
{
|
||||
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
|
||||
unsigned int *fmbm_rebm;
|
||||
int val = 0;
|
||||
int fmbm_mask = 0x01ff0000;
|
||||
|
||||
val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
|
||||
|
||||
assert(fman_ccsr_map_fd != -1);
|
||||
|
||||
fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
|
||||
|
||||
out_be32(fmbm_rebm, in_be32(fmbm_rebm) | (fd_offset << 16));
|
||||
out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -215,6 +215,11 @@ linear_dir(struct dt_dir *d)
|
|||
DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
|
||||
d->node.node.full_name);
|
||||
d->lphandle = f;
|
||||
} else if (!strcmp(f->node.node.name, "phandle")) {
|
||||
if (d->lphandle)
|
||||
DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
|
||||
d->node.node.full_name);
|
||||
d->lphandle = f;
|
||||
} else if (!strcmp(f->node.node.name, "#address-cells")) {
|
||||
if (d->a_cells)
|
||||
DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
|
||||
|
|
|
@ -80,9 +80,15 @@
|
|||
*/
|
||||
|
||||
/* Required compiler attributes */
|
||||
#ifndef __maybe_unused
|
||||
#define __maybe_unused __rte_unused
|
||||
#endif
|
||||
#ifndef __always_unused
|
||||
#define __always_unused __rte_unused
|
||||
#endif
|
||||
#ifndef __packed
|
||||
#define __packed __rte_packed
|
||||
#endif
|
||||
#define noinline __attribute__((noinline))
|
||||
|
||||
#define L1_CACHE_BYTES 64
|
||||
|
|
|
@ -310,8 +310,9 @@ rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
|
|||
struct rte_dpaa2_device *dev;
|
||||
|
||||
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
|
||||
if (start && &dev->device == start) {
|
||||
start = NULL; /* starting point found */
|
||||
if (start != NULL) {
|
||||
if (&dev->device == start)
|
||||
start = NULL; /* starting point found */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <rte_devargs.h>
|
||||
#include <rte_memcpy.h>
|
||||
#include <rte_vfio.h>
|
||||
#include <rte_memory.h>
|
||||
|
||||
#include "eal_private.h"
|
||||
#include "eal_filesystem.h"
|
||||
|
@ -582,7 +583,6 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
|
|||
{
|
||||
#define VTD_CAP_MGAW_SHIFT 16
|
||||
#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
|
||||
#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
|
||||
struct rte_pci_addr *addr = &dev->addr;
|
||||
char filename[PATH_MAX];
|
||||
FILE *fp;
|
||||
|
@ -613,10 +613,12 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
|
|||
fclose(fp);
|
||||
|
||||
mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
|
||||
if (mgaw < X86_VA_WIDTH)
|
||||
|
||||
if (!rte_eal_check_dma_mask(mgaw))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#elif defined(RTE_ARCH_PPC_64)
|
||||
static bool
|
||||
|
@ -640,13 +642,17 @@ pci_devices_iommu_support_va(void)
|
|||
{
|
||||
struct rte_pci_device *dev = NULL;
|
||||
struct rte_pci_driver *drv = NULL;
|
||||
int iommu_dma_mask_check_done = 0;
|
||||
|
||||
FOREACH_DRIVER_ON_PCIBUS(drv) {
|
||||
FOREACH_DEVICE_ON_PCIBUS(dev) {
|
||||
if (!rte_pci_match(drv, dev))
|
||||
continue;
|
||||
if (!pci_one_device_iommu_support_va(dev))
|
||||
return false;
|
||||
if (!iommu_dma_mask_check_done) {
|
||||
if (!pci_one_device_iommu_support_va(dev))
|
||||
return false;
|
||||
iommu_dma_mask_check_done = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -671,7 +671,7 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
|
|||
vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
|
||||
/* Get vfio_res */
|
||||
TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
|
||||
if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
|
||||
if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -488,17 +488,20 @@ static struct rte_device *
|
|||
pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
|
||||
const void *data)
|
||||
{
|
||||
struct rte_pci_device *dev;
|
||||
const struct rte_pci_device *pstart;
|
||||
struct rte_pci_device *pdev;
|
||||
|
||||
FOREACH_DEVICE_ON_PCIBUS(dev) {
|
||||
if (start && &dev->device == start) {
|
||||
start = NULL; /* starting point found */
|
||||
continue;
|
||||
}
|
||||
if (cmp(&dev->device, data) == 0)
|
||||
return &dev->device;
|
||||
if (start != NULL) {
|
||||
pstart = RTE_DEV_TO_PCI_CONST(start);
|
||||
pdev = TAILQ_NEXT(pstart, next);
|
||||
} else {
|
||||
pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
|
||||
}
|
||||
while (pdev != NULL) {
|
||||
if (cmp(&pdev->device, data) == 0)
|
||||
return &pdev->device;
|
||||
pdev = TAILQ_NEXT(pdev, next);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -103,6 +103,9 @@ struct rte_pci_device {
|
|||
*/
|
||||
#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
|
||||
|
||||
#define RTE_DEV_TO_PCI_CONST(ptr) \
|
||||
container_of(ptr, const struct rte_pci_device, device)
|
||||
|
||||
#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
|
||||
|
||||
/** Any PCI device identifier (vendor, device, ...) */
|
||||
|
|
|
@ -129,7 +129,7 @@ find_vdev(const char *name)
|
|||
TAILQ_FOREACH(dev, &vdev_device_list, next) {
|
||||
const char *devname = rte_vdev_device_name(dev);
|
||||
|
||||
if (!strncmp(devname, name, strlen(name)))
|
||||
if (!strcmp(devname, name))
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
|
|
@ -211,9 +211,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 16,
|
||||
.min = 1,
|
||||
.max = 16,
|
||||
.increment = 0
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
@ -232,9 +232,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 20,
|
||||
.min = 1,
|
||||
.max = 20,
|
||||
.increment = 0
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
@ -253,9 +253,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 28,
|
||||
.min = 1,
|
||||
.max = 28,
|
||||
.increment = 0
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
@ -274,9 +274,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 32,
|
||||
.max = 32,
|
||||
.increment = 0
|
||||
.min = 1,
|
||||
.max = 32,
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
@ -295,9 +295,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 48,
|
||||
.min = 1,
|
||||
.max = 48,
|
||||
.increment = 0
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
@ -316,9 +316,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
|
|||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 64,
|
||||
.min = 1,
|
||||
.max = 64,
|
||||
.increment = 0
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
|
|
|
@ -1292,9 +1292,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
|
|||
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
|
||||
|
||||
if (unlikely(
|
||||
(cipher_param->cipher_length % BYTE_LENGTH != 0)
|
||||
|| (cipher_param->cipher_offset
|
||||
% BYTE_LENGTH != 0))) {
|
||||
(op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
|
||||
(op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
|
||||
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
|
||||
|
@ -1327,8 +1326,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
|
|||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
|
||||
ctx->qat_hash_alg ==
|
||||
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
|
||||
if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
|
||||
|| (auth_param->auth_len % BYTE_LENGTH != 0))) {
|
||||
if (unlikely(
|
||||
(op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
|
||||
(op->sym->auth.data.length % BYTE_LENGTH != 0))) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
|
||||
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
|
||||
|
|
|
@ -119,8 +119,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
|
|||
struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
|
||||
uint32_t nb_caps = 0, i;
|
||||
|
||||
if (sched_ctx->capabilities)
|
||||
if (sched_ctx->capabilities) {
|
||||
rte_free(sched_ctx->capabilities);
|
||||
sched_ctx->capabilities = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < sched_ctx->nb_slaves; i++) {
|
||||
struct rte_cryptodev_info dev_info;
|
||||
|
@ -490,8 +492,10 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
|
|||
sched_ctx->ops.option_set = scheduler->ops->option_set;
|
||||
sched_ctx->ops.option_get = scheduler->ops->option_get;
|
||||
|
||||
if (sched_ctx->private_ctx)
|
||||
if (sched_ctx->private_ctx) {
|
||||
rte_free(sched_ctx->private_ctx);
|
||||
sched_ctx->private_ctx = NULL;
|
||||
}
|
||||
|
||||
if (sched_ctx->ops.create_private_ctx) {
|
||||
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
|
||||
|
|
|
@ -59,7 +59,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
/** Maximum number of multi-core worker cores */
|
||||
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
|
||||
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
|
||||
|
||||
/** Round-robin scheduling mode string */
|
||||
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
|
||||
|
|
|
@ -49,8 +49,8 @@ struct mc_scheduler_ctx {
|
|||
uint32_t num_workers; /**< Number of workers polling */
|
||||
uint32_t stop_signal;
|
||||
|
||||
struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
|
||||
struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
|
||||
struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
|
||||
struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
|
||||
};
|
||||
|
||||
struct mc_scheduler_qp_ctx {
|
||||
|
@ -356,11 +356,13 @@ static int
|
|||
scheduler_create_private_ctx(struct rte_cryptodev *dev)
|
||||
{
|
||||
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
|
||||
struct mc_scheduler_ctx *mc_ctx;
|
||||
struct mc_scheduler_ctx *mc_ctx = NULL;
|
||||
uint16_t i;
|
||||
|
||||
if (sched_ctx->private_ctx)
|
||||
if (sched_ctx->private_ctx) {
|
||||
rte_free(sched_ctx->private_ctx);
|
||||
sched_ctx->private_ctx = NULL;
|
||||
}
|
||||
|
||||
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
|
||||
rte_socket_id());
|
||||
|
@ -373,25 +375,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
|
|||
for (i = 0; i < sched_ctx->nb_wc; i++) {
|
||||
char r_name[16];
|
||||
|
||||
snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
|
||||
mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
|
||||
rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
|
||||
snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
|
||||
"%u_%u", dev->data->dev_id, i);
|
||||
mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
|
||||
if (!mc_ctx->sched_enq_ring[i]) {
|
||||
CS_LOG_ERR("Cannot create ring for worker %u", i);
|
||||
return -1;
|
||||
mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
|
||||
PER_SLAVE_BUFF_SIZE,
|
||||
rte_socket_id(),
|
||||
RING_F_SC_DEQ | RING_F_SP_ENQ);
|
||||
if (!mc_ctx->sched_enq_ring[i]) {
|
||||
CS_LOG_ERR("Cannot create ring for worker %u",
|
||||
i);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
|
||||
mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
|
||||
rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
|
||||
snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
|
||||
"%u_%u", dev->data->dev_id, i);
|
||||
mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
|
||||
if (!mc_ctx->sched_deq_ring[i]) {
|
||||
CS_LOG_ERR("Cannot create ring for worker %u", i);
|
||||
return -1;
|
||||
mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
|
||||
PER_SLAVE_BUFF_SIZE,
|
||||
rte_socket_id(),
|
||||
RING_F_SC_DEQ | RING_F_SP_ENQ);
|
||||
if (!mc_ctx->sched_deq_ring[i]) {
|
||||
CS_LOG_ERR("Cannot create ring for worker %u",
|
||||
i);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sched_ctx->private_ctx = (void *)mc_ctx;
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
for (i = 0; i < sched_ctx->nb_wc; i++) {
|
||||
rte_ring_free(mc_ctx->sched_enq_ring[i]);
|
||||
rte_ring_free(mc_ctx->sched_deq_ring[i]);
|
||||
}
|
||||
rte_free(mc_ctx);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
|
||||
|
|
|
@ -362,8 +362,10 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
|
|||
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
|
||||
struct psd_scheduler_ctx *psd_ctx;
|
||||
|
||||
if (sched_ctx->private_ctx)
|
||||
if (sched_ctx->private_ctx) {
|
||||
rte_free(sched_ctx->private_ctx);
|
||||
sched_ctx->private_ctx = NULL;
|
||||
}
|
||||
|
||||
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
|
||||
rte_socket_id());
|
||||
|
|
|
@ -48,7 +48,8 @@ struct scheduler_init_params {
|
|||
uint32_t nb_slaves;
|
||||
enum rte_cryptodev_scheduler_mode mode;
|
||||
uint32_t enable_ordering;
|
||||
uint64_t wcmask;
|
||||
uint16_t wc_pool[RTE_MAX_LCORE];
|
||||
uint16_t nb_wc;
|
||||
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
|
||||
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
|
||||
};
|
||||
|
@ -114,10 +115,6 @@ cryptodev_scheduler_create(const char *name,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (init_params->wcmask != 0)
|
||||
RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
|
||||
init_params->wcmask);
|
||||
|
||||
dev->driver_id = cryptodev_driver_id;
|
||||
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
|
||||
|
||||
|
@ -128,15 +125,12 @@ cryptodev_scheduler_create(const char *name,
|
|||
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
|
||||
uint16_t i;
|
||||
|
||||
sched_ctx->nb_wc = 0;
|
||||
sched_ctx->nb_wc = init_params->nb_wc;
|
||||
|
||||
for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
|
||||
if (init_params->wcmask & (1ULL << i)) {
|
||||
sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
|
||||
RTE_LOG(INFO, PMD,
|
||||
" Worker core[%u]=%u added\n",
|
||||
sched_ctx->nb_wc-1, i);
|
||||
}
|
||||
for (i = 0; i < sched_ctx->nb_wc; i++) {
|
||||
sched_ctx->wc_pool[i] = init_params->wc_pool[i];
|
||||
RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n",
|
||||
i, sched_ctx->wc_pool[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,9 +254,47 @@ static int
|
|||
parse_coremask_arg(const char *key __rte_unused,
|
||||
const char *value, void *extra_args)
|
||||
{
|
||||
int i, j, val;
|
||||
uint16_t idx = 0;
|
||||
char c;
|
||||
struct scheduler_init_params *params = extra_args;
|
||||
|
||||
params->wcmask = strtoull(value, NULL, 16);
|
||||
params->nb_wc = 0;
|
||||
|
||||
if (value == NULL)
|
||||
return -1;
|
||||
/* Remove all blank characters ahead and after .
|
||||
* Remove 0x/0X if exists.
|
||||
*/
|
||||
while (isblank(*value))
|
||||
value++;
|
||||
if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
|
||||
value += 2;
|
||||
i = strlen(value);
|
||||
while ((i > 0) && isblank(value[i - 1]))
|
||||
i--;
|
||||
|
||||
if (i == 0)
|
||||
return -1;
|
||||
|
||||
for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
|
||||
c = value[i];
|
||||
if (isxdigit(c) == 0) {
|
||||
/* invalid characters */
|
||||
return -1;
|
||||
}
|
||||
if (isdigit(c))
|
||||
val = c - '0';
|
||||
else if (isupper(c))
|
||||
val = c - 'A' + 10;
|
||||
else
|
||||
val = c - 'a' + 10;
|
||||
|
||||
for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
|
||||
if ((1 << j) & val)
|
||||
params->wc_pool[params->nb_wc++] = idx;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -274,7 +306,7 @@ parse_corelist_arg(const char *key __rte_unused,
|
|||
{
|
||||
struct scheduler_init_params *params = extra_args;
|
||||
|
||||
params->wcmask = 0ULL;
|
||||
params->nb_wc = 0;
|
||||
|
||||
const char *token = value;
|
||||
|
||||
|
@ -282,7 +314,11 @@ parse_corelist_arg(const char *key __rte_unused,
|
|||
char *rval;
|
||||
unsigned int core = strtoul(token, &rval, 10);
|
||||
|
||||
params->wcmask |= 1ULL << core;
|
||||
if (core >= RTE_MAX_LCORE) {
|
||||
CS_LOG_ERR("Invalid worker core %u, should be smaller "
|
||||
"than %u.\n", core, RTE_MAX_LCORE);
|
||||
}
|
||||
params->wc_pool[params->nb_wc++] = (uint16_t)core;
|
||||
token = (const char *)rval;
|
||||
if (token[0] == '\0')
|
||||
break;
|
||||
|
|
|
@ -74,6 +74,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
|
|||
sched_ctx->init_slave_names[i]);
|
||||
|
||||
rte_free(sched_ctx->init_slave_names[i]);
|
||||
sched_ctx->init_slave_names[i] = NULL;
|
||||
|
||||
sched_ctx->nb_init_slaves -= 1;
|
||||
}
|
||||
|
@ -289,11 +290,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
if (sched_ctx->private_ctx)
|
||||
if (sched_ctx->private_ctx) {
|
||||
rte_free(sched_ctx->private_ctx);
|
||||
sched_ctx->private_ctx = NULL;
|
||||
}
|
||||
|
||||
if (sched_ctx->capabilities)
|
||||
if (sched_ctx->capabilities) {
|
||||
rte_free(sched_ctx->capabilities);
|
||||
sched_ctx->capabilities = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ struct scheduler_ctx {
|
|||
|
||||
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
|
||||
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
|
||||
uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
|
||||
uint16_t wc_pool[RTE_MAX_LCORE];
|
||||
uint16_t nb_wc;
|
||||
|
||||
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
#include "rte_zuc_pmd_private.h"
|
||||
|
||||
#define ZUC_MAX_BURST 8
|
||||
#define ZUC_MAX_BURST 4
|
||||
#define BYTE_LEN 8
|
||||
|
||||
static uint8_t cryptodev_driver_id;
|
||||
|
@ -196,10 +196,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
|
|||
return sess;
|
||||
}
|
||||
|
||||
/** Encrypt/decrypt mbufs with same cipher key. */
|
||||
/** Encrypt/decrypt mbufs. */
|
||||
static uint8_t
|
||||
process_zuc_cipher_op(struct rte_crypto_op **ops,
|
||||
struct zuc_session *session,
|
||||
struct zuc_session **sessions,
|
||||
uint8_t num_ops)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -208,6 +208,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
|
|||
uint8_t *iv[ZUC_MAX_BURST];
|
||||
uint32_t num_bytes[ZUC_MAX_BURST];
|
||||
uint8_t *cipher_keys[ZUC_MAX_BURST];
|
||||
struct zuc_session *sess;
|
||||
|
||||
for (i = 0; i < num_ops; i++) {
|
||||
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
|
||||
|
@ -218,6 +219,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
|
|||
break;
|
||||
}
|
||||
|
||||
sess = sessions[i];
|
||||
|
||||
#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
|
||||
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
|
||||
(ops[i]->sym->m_dst != NULL &&
|
||||
|
@ -239,10 +242,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
|
|||
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
|
||||
(ops[i]->sym->cipher.data.offset >> 3);
|
||||
iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
|
||||
session->cipher_iv_offset);
|
||||
sess->cipher_iv_offset);
|
||||
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
|
||||
|
||||
cipher_keys[i] = session->pKey_cipher;
|
||||
cipher_keys[i] = sess->pKey_cipher;
|
||||
|
||||
processed_ops++;
|
||||
}
|
||||
|
@ -253,10 +256,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
|
|||
return processed_ops;
|
||||
}
|
||||
|
||||
/** Generate/verify hash from mbufs with same hash key. */
|
||||
/** Generate/verify hash from mbufs. */
|
||||
static int
|
||||
process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
|
||||
struct zuc_session *session,
|
||||
struct zuc_session **sessions,
|
||||
uint8_t num_ops)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -265,6 +268,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
|
|||
uint32_t *dst;
|
||||
uint32_t length_in_bits;
|
||||
uint8_t *iv;
|
||||
struct zuc_session *sess;
|
||||
|
||||
for (i = 0; i < num_ops; i++) {
|
||||
/* Data must be byte aligned */
|
||||
|
@ -274,17 +278,19 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
|
|||
break;
|
||||
}
|
||||
|
||||
sess = sessions[i];
|
||||
|
||||
length_in_bits = ops[i]->sym->auth.data.length;
|
||||
|
||||
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
|
||||
(ops[i]->sym->auth.data.offset >> 3);
|
||||
iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
|
||||
session->auth_iv_offset);
|
||||
sess->auth_iv_offset);
|
||||
|
||||
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
|
||||
if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
|
||||
dst = (uint32_t *)qp->temp_digest;
|
||||
|
||||
sso_zuc_eia3_1_buffer(session->pKey_hash,
|
||||
sso_zuc_eia3_1_buffer(sess->pKey_hash,
|
||||
iv, src,
|
||||
length_in_bits, dst);
|
||||
/* Verify digest. */
|
||||
|
@ -294,7 +300,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
|
|||
} else {
|
||||
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
|
||||
|
||||
sso_zuc_eia3_1_buffer(session->pKey_hash,
|
||||
sso_zuc_eia3_1_buffer(sess->pKey_hash,
|
||||
iv, src,
|
||||
length_in_bits, dst);
|
||||
}
|
||||
|
@ -304,33 +310,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
|
|||
return processed_ops;
|
||||
}
|
||||
|
||||
/** Process a batch of crypto ops which shares the same session. */
|
||||
/** Process a batch of crypto ops which shares the same operation type. */
|
||||
static int
|
||||
process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
|
||||
process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
|
||||
struct zuc_session **sessions,
|
||||
struct zuc_qp *qp, uint8_t num_ops,
|
||||
uint16_t *accumulated_enqueued_ops)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned enqueued_ops, processed_ops;
|
||||
|
||||
switch (session->op) {
|
||||
switch (op_type) {
|
||||
case ZUC_OP_ONLY_CIPHER:
|
||||
processed_ops = process_zuc_cipher_op(ops,
|
||||
session, num_ops);
|
||||
sessions, num_ops);
|
||||
break;
|
||||
case ZUC_OP_ONLY_AUTH:
|
||||
processed_ops = process_zuc_hash_op(qp, ops, session,
|
||||
processed_ops = process_zuc_hash_op(qp, ops, sessions,
|
||||
num_ops);
|
||||
break;
|
||||
case ZUC_OP_CIPHER_AUTH:
|
||||
processed_ops = process_zuc_cipher_op(ops, session,
|
||||
processed_ops = process_zuc_cipher_op(ops, sessions,
|
||||
num_ops);
|
||||
process_zuc_hash_op(qp, ops, session, processed_ops);
|
||||
process_zuc_hash_op(qp, ops, sessions, processed_ops);
|
||||
break;
|
||||
case ZUC_OP_AUTH_CIPHER:
|
||||
processed_ops = process_zuc_hash_op(qp, ops, session,
|
||||
processed_ops = process_zuc_hash_op(qp, ops, sessions,
|
||||
num_ops);
|
||||
process_zuc_cipher_op(ops, session, processed_ops);
|
||||
process_zuc_cipher_op(ops, sessions, processed_ops);
|
||||
break;
|
||||
default:
|
||||
/* Operation not supported. */
|
||||
|
@ -346,10 +353,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
|
|||
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
|
||||
/* Free session if a session-less crypto op. */
|
||||
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
|
||||
memset(session, 0, sizeof(struct zuc_session));
|
||||
memset(sessions[i], 0, sizeof(struct zuc_session));
|
||||
memset(ops[i]->sym->session, 0,
|
||||
rte_cryptodev_get_header_session_size());
|
||||
rte_mempool_put(qp->sess_mp, session);
|
||||
rte_mempool_put(qp->sess_mp, sessions[i]);
|
||||
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
|
||||
ops[i]->sym->session = NULL;
|
||||
}
|
||||
|
@ -370,7 +377,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
|
|||
struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
|
||||
struct rte_crypto_op *curr_c_op;
|
||||
|
||||
struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
|
||||
struct zuc_session *curr_sess;
|
||||
struct zuc_session *sessions[ZUC_MAX_BURST];
|
||||
enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
|
||||
enum zuc_operation curr_zuc_op;
|
||||
struct zuc_qp *qp = queue_pair;
|
||||
unsigned i;
|
||||
uint8_t burst_size = 0;
|
||||
|
@ -380,9 +390,6 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
|
|||
for (i = 0; i < nb_ops; i++) {
|
||||
curr_c_op = ops[i];
|
||||
|
||||
/* Set status as enqueued (not processed yet) by default. */
|
||||
curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
|
||||
|
||||
curr_sess = zuc_get_session(qp, curr_c_op);
|
||||
if (unlikely(curr_sess == NULL ||
|
||||
curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
|
||||
|
@ -391,50 +398,63 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Batch ops that share the same session. */
|
||||
if (prev_sess == NULL) {
|
||||
prev_sess = curr_sess;
|
||||
c_ops[burst_size++] = curr_c_op;
|
||||
} else if (curr_sess == prev_sess) {
|
||||
c_ops[burst_size++] = curr_c_op;
|
||||
curr_zuc_op = curr_sess->op;
|
||||
|
||||
/*
|
||||
* Batch ops that share the same operation type
|
||||
* (cipher only, auth only...).
|
||||
*/
|
||||
if (burst_size == 0) {
|
||||
prev_zuc_op = curr_zuc_op;
|
||||
c_ops[0] = curr_c_op;
|
||||
sessions[0] = curr_sess;
|
||||
burst_size++;
|
||||
} else if (curr_zuc_op == prev_zuc_op) {
|
||||
c_ops[burst_size] = curr_c_op;
|
||||
sessions[burst_size] = curr_sess;
|
||||
burst_size++;
|
||||
/*
|
||||
* When there are enough ops to process in a batch,
|
||||
* process them, and start a new batch.
|
||||
*/
|
||||
if (burst_size == ZUC_MAX_BURST) {
|
||||
processed_ops = process_ops(c_ops, prev_sess,
|
||||
qp, burst_size, &enqueued_ops);
|
||||
processed_ops = process_ops(c_ops, curr_zuc_op,
|
||||
sessions, qp, burst_size,
|
||||
&enqueued_ops);
|
||||
if (processed_ops < burst_size) {
|
||||
burst_size = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
burst_size = 0;
|
||||
prev_sess = NULL;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Different session, process the ops
|
||||
* of the previous session.
|
||||
* Different operation type, process the ops
|
||||
* of the previous type.
|
||||
*/
|
||||
processed_ops = process_ops(c_ops, prev_sess,
|
||||
qp, burst_size, &enqueued_ops);
|
||||
processed_ops = process_ops(c_ops, prev_zuc_op,
|
||||
sessions, qp, burst_size,
|
||||
&enqueued_ops);
|
||||
if (processed_ops < burst_size) {
|
||||
burst_size = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
burst_size = 0;
|
||||
prev_sess = curr_sess;
|
||||
prev_zuc_op = curr_zuc_op;
|
||||
|
||||
c_ops[burst_size++] = curr_c_op;
|
||||
c_ops[0] = curr_c_op;
|
||||
sessions[0] = curr_sess;
|
||||
burst_size++;
|
||||
}
|
||||
}
|
||||
|
||||
if (burst_size != 0) {
|
||||
/* Process the crypto ops of the last session. */
|
||||
processed_ops = process_ops(c_ops, prev_sess,
|
||||
qp, burst_size, &enqueued_ops);
|
||||
/* Process the crypto ops of the last operation type. */
|
||||
processed_ops = process_ops(c_ops, prev_zuc_op,
|
||||
sessions, qp, burst_size,
|
||||
&enqueued_ops);
|
||||
}
|
||||
|
||||
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
|
||||
|
|
|
@ -489,7 +489,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
|
|||
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
|
||||
0, dpaa2_portal->dpio_dev->token,
|
||||
evq_info->dpcon->dpcon_id);
|
||||
evq_info->link = 0;
|
||||
}
|
||||
|
||||
return (int)nb_unlinks;
|
||||
|
@ -510,8 +509,6 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
|
|||
|
||||
for (i = 0; i < nb_links; i++) {
|
||||
evq_info = &priv->evq_info[queues[i]];
|
||||
if (evq_info->link)
|
||||
continue;
|
||||
|
||||
ret = dpio_add_static_dequeue_channel(
|
||||
dpaa2_portal->dpio_dev->dpio,
|
||||
|
@ -526,7 +523,6 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
|
|||
qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
|
||||
channel_index, 1);
|
||||
evq_info->dpcon->channel_index = channel_index;
|
||||
evq_info->link = 1;
|
||||
}
|
||||
|
||||
RTE_SET_USED(priorities);
|
||||
|
@ -540,7 +536,6 @@ err:
|
|||
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
|
||||
0, dpaa2_portal->dpio_dev->token,
|
||||
evq_info->dpcon->dpcon_id);
|
||||
evq_info->link = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -100,7 +100,6 @@ struct evq_info_t {
|
|||
struct dpaa2_dpci_dev *dpci;
|
||||
/* Configuration provided by the user */
|
||||
uint32_t event_queue_cfg;
|
||||
uint8_t link;
|
||||
};
|
||||
|
||||
struct dpaa2_eventdev {
|
||||
|
|
|
@ -485,14 +485,9 @@ static int
|
|||
ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
|
||||
const struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
int ret;
|
||||
const struct octeontx_nic *nic = eth_dev->data->dev_private;
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(eth_dev);
|
||||
|
||||
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
|
||||
if (ret)
|
||||
return 0;
|
||||
octeontx_pki_port_start(nic->port_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -501,14 +496,9 @@ static int
|
|||
ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
|
||||
const struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
int ret;
|
||||
const struct octeontx_nic *nic = eth_dev->data->dev_private;
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(eth_dev);
|
||||
|
||||
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
|
||||
if (ret)
|
||||
return 0;
|
||||
octeontx_pki_port_stop(nic->port_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ static struct rte_eth_link pmd_link = {
|
|||
.link_speed = ETH_SPEED_NUM_10G,
|
||||
.link_duplex = ETH_LINK_FULL_DUPLEX,
|
||||
.link_status = ETH_LINK_DOWN,
|
||||
.link_autoneg = ETH_LINK_AUTONEG
|
||||
.link_autoneg = ETH_LINK_FIXED,
|
||||
};
|
||||
|
||||
static uint16_t
|
||||
|
|
|
@ -125,7 +125,6 @@ int bnx2x_nic_load(struct bnx2x_softc *sc);
|
|||
|
||||
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
|
||||
static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
|
||||
static void bnx2x_periodic_stop(struct bnx2x_softc *sc);
|
||||
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
|
||||
uint8_t storm, uint16_t index, uint8_t op,
|
||||
uint8_t update);
|
||||
|
@ -170,10 +169,10 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
|
|||
|
||||
dma->sc = sc;
|
||||
if (IS_PF(sc))
|
||||
sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
|
||||
snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
|
||||
rte_get_timer_cycles());
|
||||
else
|
||||
sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
|
||||
snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
|
||||
rte_get_timer_cycles());
|
||||
|
||||
/* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
|
||||
|
@ -1971,9 +1970,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
|
|||
|
||||
PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
|
||||
|
||||
/* stop the periodic callout */
|
||||
bnx2x_periodic_stop(sc);
|
||||
|
||||
/* mark driver as unloaded in shmem2 */
|
||||
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
|
||||
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
|
||||
|
@ -4492,6 +4488,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
|
|||
struct bnx2x_softc *sc = fp->sc;
|
||||
uint8_t more_rx = FALSE;
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
|
||||
|
||||
/* update the fastpath index */
|
||||
bnx2x_update_fp_sb_idx(fp);
|
||||
|
||||
|
@ -4508,7 +4506,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
|
|||
}
|
||||
|
||||
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
|
||||
le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1);
|
||||
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6999,16 +6997,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
|
|||
}
|
||||
}
|
||||
|
||||
static void bnx2x_periodic_start(struct bnx2x_softc *sc)
|
||||
{
|
||||
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
|
||||
}
|
||||
|
||||
static void bnx2x_periodic_stop(struct bnx2x_softc *sc)
|
||||
{
|
||||
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
|
||||
}
|
||||
|
||||
static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
|
||||
{
|
||||
int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
|
||||
|
@ -7043,10 +7031,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
|
|||
bnx2x_link_report(sc);
|
||||
}
|
||||
|
||||
if (!CHIP_REV_IS_SLOW(sc)) {
|
||||
bnx2x_periodic_start(sc);
|
||||
}
|
||||
|
||||
sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
|
||||
return rc;
|
||||
}
|
||||
|
@ -7078,7 +7062,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
|
|||
{
|
||||
if ((sc->state != BNX2X_STATE_OPEN) ||
|
||||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
|
||||
PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)",
|
||||
PMD_DRV_LOG(INFO, "periodic callout exit (state=0x%x)",
|
||||
sc->state);
|
||||
return;
|
||||
}
|
||||
|
@ -8289,16 +8273,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
|
|||
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
|
||||
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable internal target-read (in case we are probed after PF
|
||||
* FLR). Must be done prior to any BAR read access. Only for
|
||||
* 57712 and up
|
||||
*/
|
||||
if (!CHIP_IS_E1x(sc)) {
|
||||
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
|
||||
1);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the nvram size */
|
||||
|
@ -9675,7 +9649,17 @@ int bnx2x_attach(struct bnx2x_softc *sc)
|
|||
bnx2x_init_rte(sc);
|
||||
|
||||
if (IS_PF(sc)) {
|
||||
/* get device info and set params */
|
||||
/* Enable internal target-read (in case we are probed after PF
|
||||
* FLR). Must be done prior to any BAR read access. Only for
|
||||
* 57712 and up
|
||||
*/
|
||||
if (!CHIP_IS_E1x(sc)) {
|
||||
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
|
||||
1);
|
||||
DELAY(200000);
|
||||
}
|
||||
|
||||
/* get device info and set params */
|
||||
if (bnx2x_get_device_info(sc) != 0) {
|
||||
PMD_DRV_LOG(NOTICE, "getting device info");
|
||||
return -ENXIO;
|
||||
|
@ -9684,7 +9668,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
|
|||
/* get phy settings from shmem and 'and' against admin settings */
|
||||
bnx2x_get_phy_info(sc);
|
||||
} else {
|
||||
/* Left mac of VF unfilled, PF should set it for VF */
|
||||
/* Left mac of VF unfilled, PF should set it for VF */
|
||||
memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
|
|
|
@ -1930,6 +1930,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc);
|
|||
int bnx2x_complete_sp(struct bnx2x_softc *sc);
|
||||
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
|
||||
void bnx2x_periodic_callout(struct bnx2x_softc *sc);
|
||||
void bnx2x_periodic_stop(void *param);
|
||||
|
||||
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
|
||||
void bnx2x_vf_close(struct bnx2x_softc *sc);
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#include <rte_dev.h>
|
||||
#include <rte_ethdev_pci.h>
|
||||
#include <rte_alarm.h>
|
||||
#include <rte_atomic.h>
|
||||
|
||||
/*
|
||||
* The set of PCI devices this driver supports
|
||||
|
@ -78,26 +80,87 @@ static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
|
|||
offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
|
||||
};
|
||||
|
||||
static void
|
||||
/**
|
||||
* Atomically reads the link status information from global
|
||||
* structure rte_eth_dev.
|
||||
*
|
||||
* @param dev
|
||||
* - Pointer to the structure rte_eth_dev to read from.
|
||||
* - Pointer to the buffer to be saved with the link status.
|
||||
*
|
||||
* @return
|
||||
* - On success, zero.
|
||||
* - On failure, negative value.
|
||||
*/
|
||||
static inline int
|
||||
bnx2x_dev_atomic_read_link_status(struct rte_eth_dev *dev,
|
||||
struct rte_eth_link *link)
|
||||
{
|
||||
struct rte_eth_link *dst = link;
|
||||
struct rte_eth_link *src = &dev->data->dev_link;
|
||||
|
||||
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
|
||||
*(uint64_t *)src) == 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically writes the link status information into global
|
||||
* structure rte_eth_dev.
|
||||
*
|
||||
* @param dev
|
||||
* - Pointer to the structure rte_eth_dev to read from.
|
||||
* - Pointer to the buffer to be saved with the link status.
|
||||
*
|
||||
* @return
|
||||
* - On success, zero.
|
||||
* - On failure, negative value.
|
||||
*/
|
||||
static inline int
|
||||
bnx2x_dev_atomic_write_link_status(struct rte_eth_dev *dev,
|
||||
struct rte_eth_link *link)
|
||||
{
|
||||
struct rte_eth_link *dst = &dev->data->dev_link;
|
||||
struct rte_eth_link *src = link;
|
||||
|
||||
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
|
||||
*(uint64_t *)src) == 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_link_update(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
struct rte_eth_link orig;
|
||||
struct rte_eth_link link;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
bnx2x_link_status_update(sc);
|
||||
memset(&orig, 0, sizeof(orig));
|
||||
memset(&link, 0, sizeof(link));
|
||||
bnx2x_dev_atomic_read_link_status(dev, &orig);
|
||||
mb();
|
||||
dev->data->dev_link.link_speed = sc->link_vars.line_speed;
|
||||
link.link_speed = sc->link_vars.line_speed;
|
||||
switch (sc->link_vars.duplex) {
|
||||
case DUPLEX_FULL:
|
||||
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
break;
|
||||
case DUPLEX_HALF:
|
||||
dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
|
||||
link.link_duplex = ETH_LINK_HALF_DUPLEX;
|
||||
break;
|
||||
}
|
||||
dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
|
||||
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
|
||||
ETH_LINK_SPEED_FIXED);
|
||||
dev->data->dev_link.link_status = sc->link_vars.link_up;
|
||||
link.link_status = sc->link_vars.link_up;
|
||||
bnx2x_dev_atomic_write_link_status(dev, &link);
|
||||
|
||||
return (link.link_status == orig.link_status) ? -1 : 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -106,8 +169,6 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
|
|||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
uint32_t link_status;
|
||||
|
||||
PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
|
||||
|
||||
bnx2x_intr_legacy(sc, 0);
|
||||
|
||||
if (sc->periodic_flags & PERIODIC_GO)
|
||||
|
@ -125,14 +186,73 @@ bnx2x_interrupt_handler(void *param)
|
|||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
|
||||
PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
|
||||
|
||||
bnx2x_interrupt_action(dev);
|
||||
rte_intr_enable(&sc->pci_dev->intr_handle);
|
||||
}
|
||||
|
||||
static void bnx2x_periodic_start(void *param)
|
||||
{
|
||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
|
||||
bnx2x_interrupt_action(dev);
|
||||
if (IS_PF(sc)) {
|
||||
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
|
||||
bnx2x_periodic_start, (void *)dev);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Unable to start periodic"
|
||||
" timer rc %d", ret);
|
||||
assert(false && "Unable to start periodic timer");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bnx2x_periodic_stop(void *param)
|
||||
{
|
||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
|
||||
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
|
||||
|
||||
rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Devops - helper functions can be called from user application
|
||||
*/
|
||||
|
||||
static int
|
||||
bnx2x_dev_link_update(struct rte_eth_dev *dev,
|
||||
__rte_unused int wait_to_complete)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
return bnx2x_link_update(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2xvf_dev_link_update(struct rte_eth_dev *dev,
|
||||
__rte_unused int wait_to_complete)
|
||||
{
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
ret = bnx2x_link_update(dev);
|
||||
|
||||
bnx2x_check_bull(sc);
|
||||
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
|
||||
PMD_DRV_LOG(ERR, "PF indicated channel is down."
|
||||
"VF device is no longer operational");
|
||||
dev->data->dev_link.link_status = ETH_LINK_DOWN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_dev_configure(struct rte_eth_dev *dev)
|
||||
{
|
||||
|
@ -182,6 +302,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
|
|||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
/* start the periodic callout */
|
||||
if (sc->periodic_flags & PERIODIC_STOP)
|
||||
bnx2x_periodic_start(dev);
|
||||
|
||||
ret = bnx2x_init(sc);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
|
||||
|
@ -222,12 +346,21 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
|
|||
bnx2x_interrupt_handler, (void *)dev);
|
||||
}
|
||||
|
||||
/* stop the periodic callout */
|
||||
bnx2x_periodic_stop(dev);
|
||||
|
||||
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Update device link status */
|
||||
if (IS_PF(sc))
|
||||
bnx2x_dev_link_update(dev, 0);
|
||||
else
|
||||
bnx2xvf_dev_link_update(dev, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -299,36 +432,6 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
|
|||
bnx2x_set_rx_mode(sc);
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
int old_link_status = dev->data->dev_link.link_status;
|
||||
|
||||
bnx2x_link_update(dev);
|
||||
|
||||
return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
|
||||
{
|
||||
int old_link_status = dev->data->dev_link.link_status;
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
|
||||
bnx2x_link_update(dev);
|
||||
|
||||
bnx2x_check_bull(sc);
|
||||
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
|
||||
PMD_DRV_LOG(ERR, "PF indicated channel is down."
|
||||
"VF device is no longer operational");
|
||||
dev->data->dev_link.link_status = ETH_LINK_DOWN;
|
||||
}
|
||||
|
||||
return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
||||
{
|
||||
|
@ -580,6 +683,17 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* schedule periodic poll for slowpath link events */
|
||||
if (IS_PF(sc)) {
|
||||
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
|
||||
bnx2x_periodic_start, (void *)eth_dev);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Unable to start periodic"
|
||||
" timer rc %d", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
|
||||
|
||||
PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
|
||||
|
@ -594,18 +708,20 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
|
|||
if (IS_VF(sc)) {
|
||||
rte_spinlock_init(&sc->vf2pf_lock);
|
||||
|
||||
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
|
||||
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
|
||||
RTE_CACHE_LINE_SIZE) != 0)
|
||||
return -ENOMEM;
|
||||
ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
|
||||
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
|
||||
sc->vf2pf_mbox_mapping.vaddr;
|
||||
|
||||
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
|
||||
&sc->pf2vf_bulletin_mapping, "vf2pf_bull",
|
||||
RTE_CACHE_LINE_SIZE) != 0)
|
||||
return -ENOMEM;
|
||||
ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
|
||||
&sc->pf2vf_bulletin_mapping, "vf2pf_bull",
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
|
||||
sc->pf2vf_bulletin_mapping.vaddr;
|
||||
|
@ -613,10 +729,14 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
|
|||
ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
|
||||
sc->max_rx_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
bnx2x_periodic_stop(eth_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -58,7 +58,6 @@
|
|||
#define wmb() rte_wmb()
|
||||
#define rmb() rte_rmb()
|
||||
|
||||
|
||||
#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
|
||||
|
||||
#define BNX2X_MIN_RX_BUF_SIZE 1024
|
||||
|
@ -72,6 +71,8 @@
|
|||
/* Maximum number of Rx packets to process at a time */
|
||||
#define BNX2X_RX_BUDGET 0xffffffff
|
||||
|
||||
#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */
|
||||
|
||||
#endif
|
||||
|
||||
/* MAC address operations */
|
||||
|
|
|
@ -4143,9 +4143,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
|
|||
elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1);
|
||||
}
|
||||
|
||||
static void elink_warpcore_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint32_t serdes_net_if;
|
||||
|
@ -4222,7 +4222,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
|
|||
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
|
||||
if (vars->line_speed != ELINK_SPEED_20000) {
|
||||
PMD_DRV_LOG(DEBUG, "Speed not supported yet");
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
|
||||
elink_warpcore_set_20G_DXGXS(sc, phy, lane);
|
||||
|
@ -4242,13 +4242,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
|
|||
PMD_DRV_LOG(DEBUG,
|
||||
"Unsupported Serdes Net Interface 0x%x",
|
||||
serdes_net_if);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Take lane out of reset after configuration is finished */
|
||||
elink_warpcore_reset_lane(sc, phy, 0);
|
||||
PMD_DRV_LOG(DEBUG, "Exit config init");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void elink_warpcore_link_reset(struct elink_phy *phy,
|
||||
|
@ -5226,9 +5228,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
|
|||
return ELINK_STATUS_OK;
|
||||
}
|
||||
|
||||
static elink_status_t elink_link_settings_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_link_settings_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
|
||||
|
@ -5299,9 +5301,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static elink_status_t elink_warpcore_read_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint8_t lane;
|
||||
|
@ -5520,9 +5522,9 @@ static void elink_set_preemphasis(struct elink_phy *phy,
|
|||
}
|
||||
}
|
||||
|
||||
static void elink_xgxs_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
|
||||
(params->loopback_mode == ELINK_LOOPBACK_XGXS));
|
||||
|
@ -5567,6 +5569,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy,
|
|||
|
||||
elink_initialize_sgmii_process(phy, params, vars);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
|
||||
|
@ -5751,8 +5755,8 @@ static void elink_link_int_ack(struct elink_params *params,
|
|||
}
|
||||
}
|
||||
|
||||
static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
|
||||
uint16_t * len)
|
||||
static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
|
||||
uint16_t * len)
|
||||
{
|
||||
uint8_t *str_ptr = str;
|
||||
uint32_t mask = 0xf0000000;
|
||||
|
@ -5790,8 +5794,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
|
|||
return ELINK_STATUS_OK;
|
||||
}
|
||||
|
||||
static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
|
||||
uint8_t * str, uint16_t * len)
|
||||
static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
|
||||
uint8_t * str, uint16_t * len)
|
||||
{
|
||||
str[0] = '\0';
|
||||
(*len)--;
|
||||
|
@ -6802,9 +6806,9 @@ static void elink_8073_specific_func(struct elink_phy *phy,
|
|||
}
|
||||
}
|
||||
|
||||
static elink_status_t elink_8073_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_8073_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint16_t val = 0, tmp1;
|
||||
|
@ -7097,9 +7101,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
|
|||
/******************************************************************/
|
||||
/* BNX2X8705 PHY SECTION */
|
||||
/******************************************************************/
|
||||
static elink_status_t elink_8705_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
__rte_unused struct elink_vars
|
||||
static uint8_t elink_8705_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
__rte_unused struct elink_vars
|
||||
*vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
|
@ -8403,9 +8407,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
|
|||
return ELINK_STATUS_OK;
|
||||
}
|
||||
|
||||
static elink_status_t elink_8706_read_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_8706_read_status(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
return elink_8706_8726_read_status(phy, params, vars);
|
||||
}
|
||||
|
@ -8477,9 +8481,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
|
|||
return link_up;
|
||||
}
|
||||
|
||||
static elink_status_t elink_8726_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_8726_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
|
||||
|
@ -8684,9 +8688,9 @@ static void elink_8727_config_speed(struct elink_phy *phy,
|
|||
}
|
||||
}
|
||||
|
||||
static elink_status_t elink_8727_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
__rte_unused struct elink_vars
|
||||
static uint8_t elink_8727_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
__rte_unused struct elink_vars
|
||||
*vars)
|
||||
{
|
||||
uint32_t tx_en_mode;
|
||||
|
@ -9291,7 +9295,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
|
|||
return ELINK_STATUS_OK;
|
||||
}
|
||||
|
||||
static elink_status_t elink_8481_config_init(struct elink_phy *phy,
|
||||
static uint8_t elink_8481_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
|
@ -9442,8 +9446,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
|
|||
return reset_gpios;
|
||||
}
|
||||
|
||||
static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
|
||||
struct elink_params *params)
|
||||
static void elink_84833_hw_reset_phy(struct elink_phy *phy,
|
||||
struct elink_params *params)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint8_t reset_gpios;
|
||||
|
@ -9471,8 +9475,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
|
|||
MISC_REGISTERS_GPIO_OUTPUT_LOW);
|
||||
DELAY(10);
|
||||
PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
|
||||
|
||||
return ELINK_STATUS_OK;
|
||||
}
|
||||
|
||||
static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
|
||||
|
@ -9513,9 +9515,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
|
|||
}
|
||||
|
||||
#define PHY84833_CONSTANT_LATENCY 1193
|
||||
static elink_status_t elink_848x3_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_848x3_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint8_t port, initialize = 1;
|
||||
|
@ -9819,7 +9821,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
|
|||
return link_up;
|
||||
}
|
||||
|
||||
static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
|
||||
static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
|
||||
uint16_t * len)
|
||||
{
|
||||
elink_status_t status = ELINK_STATUS_OK;
|
||||
|
@ -10146,9 +10148,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
|
|||
}
|
||||
}
|
||||
|
||||
static elink_status_t elink_54618se_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_54618se_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
uint8_t port;
|
||||
|
@ -10542,9 +10544,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy,
|
|||
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
|
||||
}
|
||||
|
||||
static elink_status_t elink_7101_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
static uint8_t elink_7101_config_init(struct elink_phy *phy,
|
||||
struct elink_params *params,
|
||||
struct elink_vars *vars)
|
||||
{
|
||||
uint16_t fw_ver1, fw_ver2, val;
|
||||
struct bnx2x_softc *sc = params->sc;
|
||||
|
@ -10614,8 +10616,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
|
|||
return link_up;
|
||||
}
|
||||
|
||||
static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
|
||||
uint16_t * len)
|
||||
static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
|
||||
uint16_t * len)
|
||||
{
|
||||
if (*len < 5)
|
||||
return ELINK_STATUS_ERROR;
|
||||
|
@ -10680,14 +10682,14 @@ static const struct elink_phy phy_null = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) NULL,
|
||||
.read_status = (read_status_t) NULL,
|
||||
.link_reset = (link_reset_t) NULL,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) NULL,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = NULL,
|
||||
.read_status = NULL,
|
||||
.link_reset = NULL,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = NULL,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_serdes = {
|
||||
|
@ -10714,14 +10716,14 @@ static const struct elink_phy phy_serdes = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_xgxs_config_init,
|
||||
.read_status = (read_status_t) elink_link_settings_status,
|
||||
.link_reset = (link_reset_t) elink_int_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) NULL,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_xgxs_config_init,
|
||||
.read_status = elink_link_settings_status,
|
||||
.link_reset = elink_int_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = NULL,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_xgxs = {
|
||||
|
@ -10749,14 +10751,14 @@ static const struct elink_phy phy_xgxs = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_xgxs_config_init,
|
||||
.read_status = (read_status_t) elink_link_settings_status,
|
||||
.link_reset = (link_reset_t) elink_int_link_reset,
|
||||
.config_loopback = (config_loopback_t) elink_set_xgxs_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t) NULL,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func
|
||||
.config_init = elink_xgxs_config_init,
|
||||
.read_status = elink_link_settings_status,
|
||||
.link_reset = elink_int_link_reset,
|
||||
.config_loopback = elink_set_xgxs_loopback,
|
||||
.format_fw_ver = NULL,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = elink_xgxs_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_warpcore = {
|
||||
|
@ -10785,14 +10787,14 @@ static const struct elink_phy phy_warpcore = {
|
|||
.speed_cap_mask = 0,
|
||||
/* req_duplex = */ 0,
|
||||
/* rsrv = */ 0,
|
||||
.config_init = (config_init_t) elink_warpcore_config_init,
|
||||
.read_status = (read_status_t) elink_warpcore_read_status,
|
||||
.link_reset = (link_reset_t) elink_warpcore_link_reset,
|
||||
.config_loopback = (config_loopback_t) elink_set_warpcore_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t) NULL,
|
||||
.hw_reset = (hw_reset_t) elink_warpcore_hw_reset,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_warpcore_config_init,
|
||||
.read_status = elink_warpcore_read_status,
|
||||
.link_reset = elink_warpcore_link_reset,
|
||||
.config_loopback = elink_set_warpcore_loopback,
|
||||
.format_fw_ver = NULL,
|
||||
.hw_reset = elink_warpcore_hw_reset,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_7101 = {
|
||||
|
@ -10814,14 +10816,14 @@ static const struct elink_phy phy_7101 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_7101_config_init,
|
||||
.read_status = (read_status_t) elink_7101_read_status,
|
||||
.link_reset = (link_reset_t) elink_common_ext_link_reset,
|
||||
.config_loopback = (config_loopback_t) elink_7101_config_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_7101_format_ver,
|
||||
.hw_reset = (hw_reset_t) elink_7101_hw_reset,
|
||||
.set_link_led = (set_link_led_t) elink_7101_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_7101_config_init,
|
||||
.read_status = elink_7101_read_status,
|
||||
.link_reset = elink_common_ext_link_reset,
|
||||
.config_loopback = elink_7101_config_loopback,
|
||||
.format_fw_ver = elink_7101_format_ver,
|
||||
.hw_reset = elink_7101_hw_reset,
|
||||
.set_link_led = elink_7101_set_link_led,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8073 = {
|
||||
|
@ -10845,14 +10847,14 @@ static const struct elink_phy phy_8073 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8073_config_init,
|
||||
.read_status = (read_status_t) elink_8073_read_status,
|
||||
.link_reset = (link_reset_t) elink_8073_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_format_ver,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_8073_specific_func
|
||||
.config_init = elink_8073_config_init,
|
||||
.read_status = elink_8073_read_status,
|
||||
.link_reset = elink_8073_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_format_ver,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = elink_8073_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8705 = {
|
||||
|
@ -10873,14 +10875,14 @@ static const struct elink_phy phy_8705 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8705_config_init,
|
||||
.read_status = (read_status_t) elink_8705_read_status,
|
||||
.link_reset = (link_reset_t) elink_common_ext_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_null_format_ver,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_8705_config_init,
|
||||
.read_status = elink_8705_read_status,
|
||||
.link_reset = elink_common_ext_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_null_format_ver,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8706 = {
|
||||
|
@ -10902,14 +10904,14 @@ static const struct elink_phy phy_8706 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8706_config_init,
|
||||
.read_status = (read_status_t) elink_8706_read_status,
|
||||
.link_reset = (link_reset_t) elink_common_ext_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_format_ver,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_8706_config_init,
|
||||
.read_status = elink_8706_read_status,
|
||||
.link_reset = elink_common_ext_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_format_ver,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8726 = {
|
||||
|
@ -10932,14 +10934,14 @@ static const struct elink_phy phy_8726 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8726_config_init,
|
||||
.read_status = (read_status_t) elink_8726_read_status,
|
||||
.link_reset = (link_reset_t) elink_8726_link_reset,
|
||||
.config_loopback = (config_loopback_t) elink_8726_config_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_format_ver,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) NULL,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_8726_config_init,
|
||||
.read_status = elink_8726_read_status,
|
||||
.link_reset = elink_8726_link_reset,
|
||||
.config_loopback = elink_8726_config_loopback,
|
||||
.format_fw_ver = elink_format_ver,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = NULL,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8727 = {
|
||||
|
@ -10961,14 +10963,14 @@ static const struct elink_phy phy_8727 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8727_config_init,
|
||||
.read_status = (read_status_t) elink_8727_read_status,
|
||||
.link_reset = (link_reset_t) elink_8727_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_format_ver,
|
||||
.hw_reset = (hw_reset_t) elink_8727_hw_reset,
|
||||
.set_link_led = (set_link_led_t) elink_8727_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_8727_specific_func
|
||||
.config_init = elink_8727_config_init,
|
||||
.read_status = elink_8727_read_status,
|
||||
.link_reset = elink_8727_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_format_ver,
|
||||
.hw_reset = elink_8727_hw_reset,
|
||||
.set_link_led = elink_8727_set_link_led,
|
||||
.phy_specific_func = elink_8727_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_8481 = {
|
||||
|
@ -10996,14 +10998,14 @@ static const struct elink_phy phy_8481 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_8481_config_init,
|
||||
.read_status = (read_status_t) elink_848xx_read_status,
|
||||
.link_reset = (link_reset_t) elink_8481_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
|
||||
.hw_reset = (hw_reset_t) elink_8481_hw_reset,
|
||||
.set_link_led = (set_link_led_t) elink_848xx_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) NULL
|
||||
.config_init = elink_8481_config_init,
|
||||
.read_status = elink_848xx_read_status,
|
||||
.link_reset = elink_8481_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_848xx_format_ver,
|
||||
.hw_reset = elink_8481_hw_reset,
|
||||
.set_link_led = elink_848xx_set_link_led,
|
||||
.phy_specific_func = NULL
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_84823 = {
|
||||
|
@ -11031,14 +11033,14 @@ static const struct elink_phy phy_84823 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_848x3_config_init,
|
||||
.read_status = (read_status_t) elink_848xx_read_status,
|
||||
.link_reset = (link_reset_t) elink_848x3_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) elink_848xx_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
|
||||
.config_init = elink_848x3_config_init,
|
||||
.read_status = elink_848xx_read_status,
|
||||
.link_reset = elink_848x3_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_848xx_format_ver,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = elink_848xx_set_link_led,
|
||||
.phy_specific_func = elink_848xx_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_84833 = {
|
||||
|
@ -11065,14 +11067,14 @@ static const struct elink_phy phy_84833 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_848x3_config_init,
|
||||
.read_status = (read_status_t) elink_848xx_read_status,
|
||||
.link_reset = (link_reset_t) elink_848x3_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
|
||||
.hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
|
||||
.set_link_led = (set_link_led_t) elink_848xx_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
|
||||
.config_init = elink_848x3_config_init,
|
||||
.read_status = elink_848xx_read_status,
|
||||
.link_reset = elink_848x3_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_848xx_format_ver,
|
||||
.hw_reset = elink_84833_hw_reset_phy,
|
||||
.set_link_led = elink_848xx_set_link_led,
|
||||
.phy_specific_func = elink_848xx_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_84834 = {
|
||||
|
@ -11098,14 +11100,14 @@ static const struct elink_phy phy_84834 = {
|
|||
.speed_cap_mask = 0,
|
||||
.req_duplex = 0,
|
||||
.rsrv = 0,
|
||||
.config_init = (config_init_t) elink_848x3_config_init,
|
||||
.read_status = (read_status_t) elink_848xx_read_status,
|
||||
.link_reset = (link_reset_t) elink_848x3_link_reset,
|
||||
.config_loopback = (config_loopback_t) NULL,
|
||||
.format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
|
||||
.hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
|
||||
.set_link_led = (set_link_led_t) elink_848xx_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
|
||||
.config_init = elink_848x3_config_init,
|
||||
.read_status = elink_848xx_read_status,
|
||||
.link_reset = elink_848x3_link_reset,
|
||||
.config_loopback = NULL,
|
||||
.format_fw_ver = elink_848xx_format_ver,
|
||||
.hw_reset = elink_84833_hw_reset_phy,
|
||||
.set_link_led = elink_848xx_set_link_led,
|
||||
.phy_specific_func = elink_848xx_specific_func
|
||||
};
|
||||
|
||||
static const struct elink_phy phy_54618se = {
|
||||
|
@ -11131,14 +11133,14 @@ static const struct elink_phy phy_54618se = {
|
|||
.speed_cap_mask = 0,
|
||||
/* req_duplex = */ 0,
|
||||
/* rsrv = */ 0,
|
||||
.config_init = (config_init_t) elink_54618se_config_init,
|
||||
.read_status = (read_status_t) elink_54618se_read_status,
|
||||
.link_reset = (link_reset_t) elink_54618se_link_reset,
|
||||
.config_loopback = (config_loopback_t) elink_54618se_config_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t) NULL,
|
||||
.hw_reset = (hw_reset_t) NULL,
|
||||
.set_link_led = (set_link_led_t) elink_5461x_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func
|
||||
.config_init = elink_54618se_config_init,
|
||||
.read_status = elink_54618se_read_status,
|
||||
.link_reset = elink_54618se_link_reset,
|
||||
.config_loopback = elink_54618se_config_loopback,
|
||||
.format_fw_ver = NULL,
|
||||
.hw_reset = NULL,
|
||||
.set_link_led = elink_5461x_set_link_led,
|
||||
.phy_specific_func = elink_54618se_specific_func
|
||||
};
|
||||
|
||||
/*****************************************************************/
|
||||
|
@ -12919,7 +12921,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
|
|||
*/
|
||||
not_kr2_device = (((base_page & 0x8000) == 0) ||
|
||||
(((base_page & 0x8000) &&
|
||||
((next_page & 0xe0) == 0x2))));
|
||||
((next_page & 0xe0) == 0x20))));
|
||||
|
||||
/* In case KR2 is already disabled, check if we need to re-enable it */
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
|
|
|
@ -248,6 +248,17 @@ static int bnxt_init_chip(struct bnxt *bp)
|
|||
/* VNIC configuration */
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
|
||||
uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
|
||||
|
||||
vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
|
||||
if (!vnic->fw_grp_ids) {
|
||||
RTE_LOG(ERR, PMD,
|
||||
"Failed to alloc %d bytes for group ids\n",
|
||||
size);
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
memset(vnic->fw_grp_ids, -1, size);
|
||||
|
||||
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
|
||||
if (rc) {
|
||||
|
@ -400,10 +411,6 @@ static int bnxt_init_nic(struct bnxt *bp)
|
|||
bnxt_init_vnics(bp);
|
||||
bnxt_init_filters(bp);
|
||||
|
||||
rc = bnxt_init_chip(bp);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -433,7 +440,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
|||
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
|
||||
dev_info->max_rx_queues = max_rx_rings;
|
||||
dev_info->max_tx_queues = max_rx_rings;
|
||||
dev_info->reta_size = bp->max_rsscos_ctx;
|
||||
dev_info->reta_size = HW_HASH_INDEX_SIZE;
|
||||
dev_info->hash_key_size = 40;
|
||||
max_vnics = bp->max_vnics;
|
||||
|
||||
|
@ -465,7 +472,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
|||
.wthresh = 0,
|
||||
},
|
||||
.rx_free_thresh = 32,
|
||||
.rx_drop_en = 0,
|
||||
/* If no descriptors available, pkts are dropped by default */
|
||||
.rx_drop_en = 1,
|
||||
};
|
||||
|
||||
dev_info->default_txconf = (struct rte_eth_txconf) {
|
||||
|
@ -572,7 +580,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
|
|||
}
|
||||
bp->dev_stopped = 0;
|
||||
|
||||
rc = bnxt_init_nic(bp);
|
||||
rc = bnxt_init_chip(bp);
|
||||
if (rc)
|
||||
goto error;
|
||||
|
||||
|
@ -631,6 +639,8 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
|
|||
}
|
||||
bnxt_set_hwrm_link_config(bp, false);
|
||||
bnxt_hwrm_port_clr_stats(bp);
|
||||
bnxt_free_tx_mbufs(bp);
|
||||
bnxt_free_rx_mbufs(bp);
|
||||
bnxt_shutdown_nic(bp);
|
||||
bp->dev_stopped = 1;
|
||||
}
|
||||
|
@ -642,8 +652,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
|
|||
if (bp->dev_stopped == 0)
|
||||
bnxt_dev_stop_op(eth_dev);
|
||||
|
||||
bnxt_free_tx_mbufs(bp);
|
||||
bnxt_free_rx_mbufs(bp);
|
||||
bnxt_free_mem(bp);
|
||||
if (eth_dev->data->mac_addrs != NULL) {
|
||||
rte_free(eth_dev->data->mac_addrs);
|
||||
|
@ -1271,9 +1279,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
|
|||
struct bnxt_vnic_info *vnic;
|
||||
unsigned int i;
|
||||
int rc = 0;
|
||||
uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
|
||||
uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
|
||||
uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
|
||||
uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
|
||||
|
||||
/* Cycle through all VNICs */
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
|
@ -1320,8 +1328,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
|
|||
memcpy(new_filter->l2_addr, filter->l2_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
/* MAC + VLAN ID filter */
|
||||
new_filter->l2_ovlan = vlan_id;
|
||||
new_filter->l2_ovlan_mask = 0xF000;
|
||||
new_filter->l2_ivlan = vlan_id;
|
||||
new_filter->l2_ivlan_mask = 0xF000;
|
||||
new_filter->enables |= en;
|
||||
rc = bnxt_hwrm_set_l2_filter(bp,
|
||||
vnic->fw_vnic_id,
|
||||
|
@ -1544,6 +1552,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
|||
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
|
||||
uint16_t size = 0;
|
||||
|
||||
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
|
||||
|
@ -1551,9 +1560,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
|||
if (rc)
|
||||
break;
|
||||
|
||||
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
|
||||
if (rc)
|
||||
return rc;
|
||||
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
|
||||
size -= RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
if (size < new_mtu) {
|
||||
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -3057,6 +3071,7 @@ skip_init:
|
|||
goto error_free_int;
|
||||
|
||||
bnxt_enable_int(bp);
|
||||
bnxt_init_nic(bp);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -159,6 +159,14 @@ void bnxt_free_filter_mem(struct bnxt *bp)
|
|||
|
||||
rte_free(bp->filter_info);
|
||||
bp->filter_info = NULL;
|
||||
|
||||
for (i = 0; i < bp->pf.max_vfs; i++) {
|
||||
STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
|
||||
rte_free(filter);
|
||||
STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
|
||||
bnxt_filter_info, next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int bnxt_alloc_filter_mem(struct bnxt *bp)
|
||||
|
@ -1045,9 +1053,13 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
|
|||
sizeof(nf->dst_ipaddr_mask))) {
|
||||
if (mf->dst_id == nf->dst_id)
|
||||
return -EEXIST;
|
||||
/* Same Flow, Different queue
|
||||
/*
|
||||
* Same Flow, Different queue
|
||||
* Clear the old ntuple filter
|
||||
* Reuse the matching L2 filter
|
||||
* ID for the new filter
|
||||
*/
|
||||
nf->fw_l2_filter_id = mf->fw_l2_filter_id;
|
||||
if (nf->filter_type == HWRM_CFA_EM_FILTER)
|
||||
bnxt_hwrm_clear_em_filter(bp, mf);
|
||||
if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
|
||||
|
|
|
@ -197,6 +197,10 @@ err_ret:
|
|||
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
|
||||
__func__, rc); \
|
||||
rte_spinlock_unlock(&bp->hwrm_lock); \
|
||||
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
|
||||
rc = -EACCES; \
|
||||
else if (rc > 0) \
|
||||
rc = -EINVAL; \
|
||||
return rc; \
|
||||
} \
|
||||
if (resp->error_code) { \
|
||||
|
@ -218,6 +222,10 @@ err_ret:
|
|||
"%s error %d\n", __func__, rc); \
|
||||
} \
|
||||
rte_spinlock_unlock(&bp->hwrm_lock); \
|
||||
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
|
||||
rc = -EACCES; \
|
||||
else if (rc > 0) \
|
||||
rc = -EINVAL; \
|
||||
return rc; \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -252,6 +260,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
|
|||
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
|
||||
return rc;
|
||||
|
||||
HWRM_PREP(req, CFA_L2_SET_RX_MASK);
|
||||
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
|
||||
|
||||
|
@ -403,13 +414,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
|
|||
req.l2_ovlan = filter->l2_ovlan;
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
|
||||
req.l2_ovlan = filter->l2_ivlan;
|
||||
req.l2_ivlan = filter->l2_ivlan;
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
|
||||
req.l2_ovlan_mask = filter->l2_ovlan_mask;
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
|
||||
req.l2_ovlan_mask = filter->l2_ivlan_mask;
|
||||
req.l2_ivlan_mask = filter->l2_ivlan_mask;
|
||||
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
|
||||
req.src_id = rte_cpu_to_le_32(filter->src_id);
|
||||
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
|
||||
|
@ -1089,8 +1100,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
|
|||
/* map ring groups to this vnic */
|
||||
RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
|
||||
vnic->start_grp_id, vnic->end_grp_id);
|
||||
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
|
||||
for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
|
||||
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
|
||||
|
||||
vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
|
||||
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
|
@ -1100,7 +1112,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
|
|||
HWRM_PREP(req, VNIC_ALLOC);
|
||||
|
||||
if (vnic->func_default)
|
||||
req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
|
||||
req.flags =
|
||||
rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
HWRM_CHECK_RESULT();
|
||||
|
@ -1121,7 +1134,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
|
|||
|
||||
HWRM_PREP(req, VNIC_PLCMODES_QCFG);
|
||||
|
||||
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
|
||||
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
|
||||
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
|
@ -1149,7 +1162,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
|
|||
|
||||
HWRM_PREP(req, VNIC_PLCMODES_CFG);
|
||||
|
||||
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
|
||||
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
|
||||
req.flags = rte_cpu_to_le_32(pmode->flags);
|
||||
req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
|
||||
req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
|
||||
|
@ -1381,6 +1394,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
|
|||
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
uint16_t size;
|
||||
|
||||
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
|
||||
RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
HWRM_PREP(req, VNIC_PLCMODES_CFG);
|
||||
|
||||
req.flags = rte_cpu_to_le_32(
|
||||
|
@ -1393,7 +1411,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
|
|||
size -= RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
req.jumbo_thresh = rte_cpu_to_le_16(size);
|
||||
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
|
||||
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
|
||||
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
|
@ -1424,12 +1442,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
|
|||
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
|
||||
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
|
||||
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
|
||||
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
|
||||
req.max_agg_segs = rte_cpu_to_le_16(5);
|
||||
req.max_aggs =
|
||||
rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
|
||||
req.min_agg_len = rte_cpu_to_le_32(512);
|
||||
}
|
||||
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
|
||||
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
|
@ -1794,6 +1812,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
|
|||
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
|
||||
else
|
||||
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
|
||||
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
|
||||
//if (rc)
|
||||
//break;
|
||||
}
|
||||
|
@ -1881,6 +1900,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
|
|||
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
|
||||
|
||||
bnxt_hwrm_vnic_free(bp, vnic);
|
||||
|
||||
rte_free(vnic->fw_grp_ids);
|
||||
}
|
||||
/* Ring resources */
|
||||
bnxt_free_all_hwrm_rings(bp);
|
||||
|
@ -3093,13 +3114,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
|
|||
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
HWRM_CHECK_RESULT();
|
||||
HWRM_UNLOCK();
|
||||
|
||||
if (rc == 0)
|
||||
memcpy(data, buf, len > buflen ? buflen : len);
|
||||
|
||||
rte_free(buf);
|
||||
HWRM_CHECK_RESULT();
|
||||
HWRM_UNLOCK();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -3131,12 +3151,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
|
|||
req.offset = rte_cpu_to_le_32(offset);
|
||||
req.len = rte_cpu_to_le_32(length);
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
HWRM_CHECK_RESULT();
|
||||
HWRM_UNLOCK();
|
||||
if (rc == 0)
|
||||
memcpy(data, buf, length);
|
||||
|
||||
rte_free(buf);
|
||||
HWRM_CHECK_RESULT();
|
||||
HWRM_UNLOCK();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -3167,14 +3188,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
|
|||
rte_iova_t dma_handle;
|
||||
uint8_t *buf;
|
||||
|
||||
HWRM_PREP(req, NVM_WRITE);
|
||||
|
||||
req.dir_type = rte_cpu_to_le_16(dir_type);
|
||||
req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
|
||||
req.dir_ext = rte_cpu_to_le_16(dir_ext);
|
||||
req.dir_attr = rte_cpu_to_le_16(dir_attr);
|
||||
req.dir_data_length = rte_cpu_to_le_32(data_len);
|
||||
|
||||
buf = rte_malloc("nvm_write", data_len, 0);
|
||||
rte_mem_lock_page(buf);
|
||||
if (!buf)
|
||||
|
@ -3187,14 +3200,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
|
|||
return -ENOMEM;
|
||||
}
|
||||
memcpy(buf, data, data_len);
|
||||
|
||||
HWRM_PREP(req, NVM_WRITE);
|
||||
|
||||
req.dir_type = rte_cpu_to_le_16(dir_type);
|
||||
req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
|
||||
req.dir_ext = rte_cpu_to_le_16(dir_ext);
|
||||
req.dir_attr = rte_cpu_to_le_16(dir_attr);
|
||||
req.dir_data_length = rte_cpu_to_le_32(data_len);
|
||||
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
|
||||
|
||||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
|
||||
|
||||
rte_free(buf);
|
||||
HWRM_CHECK_RESULT();
|
||||
HWRM_UNLOCK();
|
||||
|
||||
rte_free(buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,37 @@
|
|||
/* Broadcom NetXtreme-C/E network driver.
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) 2014-2016 Broadcom Corporation
|
||||
* Copyright (c) 2016-2017 Broadcom Limited
|
||||
* Copyright(c) Broadcom Limited.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Broadcom Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _BNXT_NVM_DEFS_H_
|
||||
#define _BNXT_NVM_DEFS_H_
|
||||
|
||||
|
|
|
@ -237,7 +237,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
|
|||
if (rxq) {
|
||||
sw_ring = rxq->rx_ring->rx_buf_ring;
|
||||
if (sw_ring) {
|
||||
for (i = 0; i < rxq->nb_rx_desc; i++) {
|
||||
for (i = 0;
|
||||
i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
|
||||
if (sw_ring[i].mbuf) {
|
||||
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
|
||||
sw_ring[i].mbuf = NULL;
|
||||
|
@ -247,7 +248,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
|
|||
/* Free up mbufs in Agg ring */
|
||||
sw_ring = rxq->rx_ring->ag_buf_ring;
|
||||
if (sw_ring) {
|
||||
for (i = 0; i < rxq->nb_rx_desc; i++) {
|
||||
for (i = 0;
|
||||
i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
|
||||
if (sw_ring[i].mbuf) {
|
||||
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
|
||||
sw_ring[i].mbuf = NULL;
|
||||
|
|
|
@ -469,11 +469,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
|||
|
||||
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
|
||||
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
|
||||
else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
|
||||
mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
|
||||
else
|
||||
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
|
||||
|
||||
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
|
||||
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
|
||||
else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
|
||||
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
|
||||
else
|
||||
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
|
||||
|
||||
|
@ -730,7 +734,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
|
|||
if (rxq->rx_buf_use_size <= size)
|
||||
size = rxq->rx_buf_use_size;
|
||||
|
||||
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
|
||||
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
|
||||
|
||||
rxr = rxq->rx_ring;
|
||||
ring = rxr->rx_ring_struct;
|
||||
|
|
|
@ -52,22 +52,36 @@
|
|||
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
|
||||
((hdr_info) & 0x1ff)
|
||||
|
||||
#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
|
||||
#define RX_CMP_L4_CS_BITS \
|
||||
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
|
||||
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
|
||||
|
||||
#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
|
||||
#define RX_CMP_L4_CS_ERR_BITS \
|
||||
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
|
||||
RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
|
||||
|
||||
#define RX_CMP_L4_CS_OK(rxcmp1) \
|
||||
(((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
|
||||
!((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
|
||||
|
||||
#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
|
||||
#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
|
||||
!((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
|
||||
|
||||
#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
|
||||
#define RX_CMP_IP_CS_ERR_BITS \
|
||||
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
|
||||
RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
|
||||
|
||||
#define RX_CMP_IP_CS_BITS \
|
||||
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
|
||||
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
|
||||
|
||||
#define RX_CMP_IP_CS_OK(rxcmp1) \
|
||||
(((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
|
||||
!((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
|
||||
|
||||
#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
|
||||
!((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
|
||||
|
||||
enum pkt_hash_types {
|
||||
PKT_HASH_TYPE_NONE, /* Undefined type */
|
||||
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
|
||||
|
|
|
@ -296,6 +296,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
|
|||
count = 0;
|
||||
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
|
||||
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
|
||||
xstats[count].id = count;
|
||||
xstats[count].value = rte_le_to_cpu_64(
|
||||
*(uint64_t *)((char *)rx_stats +
|
||||
bnxt_rx_stats_strings[i].offset));
|
||||
|
@ -304,6 +305,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
|
|||
|
||||
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
|
||||
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
|
||||
xstats[count].id = count;
|
||||
xstats[count].value = rte_le_to_cpu_64(
|
||||
*(uint64_t *)((char *)tx_stats +
|
||||
bnxt_tx_stats_strings[i].offset));
|
||||
|
@ -311,6 +313,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
|
|||
}
|
||||
|
||||
/* The Tx drop pkts aka the Anti spoof coounter */
|
||||
xstats[count].id = count;
|
||||
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
|
||||
count++;
|
||||
|
||||
|
|
|
@ -161,7 +161,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
|||
|
||||
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
|
||||
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
|
||||
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
|
||||
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
|
||||
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
|
||||
PKT_TX_TUNNEL_GENEVE))
|
||||
long_bd = true;
|
||||
|
||||
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
|
||||
|
@ -222,16 +224,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
|||
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
|
||||
PKT_TX_OIP_IIP_TCP_CKSUM) {
|
||||
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
|
||||
PKT_TX_OIP_IIP_UDP_CKSUM) {
|
||||
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
|
||||
PKT_TX_IIP_TCP_UDP_CKSUM) {
|
||||
/* (Inner) IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
|
||||
PKT_TX_IIP_UDP_CKSUM) {
|
||||
/* (Inner) IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
|
||||
PKT_TX_IIP_TCP_CKSUM) {
|
||||
/* (Inner) IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
|
||||
PKT_TX_OIP_TCP_UDP_CKSUM) {
|
||||
/* Outer IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
|
||||
PKT_TX_OIP_UDP_CKSUM) {
|
||||
/* Outer IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
|
||||
PKT_TX_OIP_TCP_CKSUM) {
|
||||
/* Outer IP, (Inner) TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
|
||||
PKT_TX_OIP_IIP_CKSUM) {
|
||||
/* Outer IP, Inner IP CSO */
|
||||
|
@ -242,11 +274,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
|||
/* TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
|
||||
PKT_TX_TCP_CKSUM) {
|
||||
/* TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
|
||||
PKT_TX_UDP_CKSUM) {
|
||||
/* TCP/UDP CSO */
|
||||
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
|
||||
PKT_TX_IP_CKSUM) {
|
||||
/* IP CSO */
|
||||
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
} else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
|
||||
} else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
|
||||
PKT_TX_OUTER_IP_CKSUM) {
|
||||
/* IP CSO */
|
||||
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
|
||||
txbd1->mss = 0;
|
||||
|
@ -270,6 +314,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
|||
}
|
||||
|
||||
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
|
||||
txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
|
||||
|
||||
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
|
||||
|
||||
|
|
|
@ -71,10 +71,20 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||
|
||||
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
|
||||
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
|
||||
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
|
||||
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
|
||||
PKT_TX_IP_CKSUM)
|
||||
#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
|
||||
#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
|
||||
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
|
||||
PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
|
||||
PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
|
||||
PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
|
||||
PKT_TX_OUTER_IP_CKSUM)
|
||||
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
|
||||
|
|
|
@ -67,7 +67,7 @@ void bnxt_init_vnics(struct bnxt *bp)
|
|||
{
|
||||
struct bnxt_vnic_info *vnic;
|
||||
uint16_t max_vnics;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
max_vnics = bp->max_vnics;
|
||||
STAILQ_INIT(&bp->free_vnic_list);
|
||||
|
@ -78,9 +78,6 @@ void bnxt_init_vnics(struct bnxt *bp)
|
|||
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
|
||||
for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
|
||||
vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
|
||||
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
|
||||
STAILQ_INIT(&vnic->filter);
|
||||
STAILQ_INIT(&vnic->flow_list);
|
||||
|
|
|
@ -43,13 +43,9 @@ struct bnxt_vnic_info {
|
|||
|
||||
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
|
||||
uint16_t rss_rule;
|
||||
#define MAX_NUM_TRAFFIC_CLASSES 8
|
||||
#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
|
||||
#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
|
||||
MAX_NUM_TRAFFIC_CLASSES)
|
||||
uint16_t start_grp_id;
|
||||
uint16_t end_grp_id;
|
||||
uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
|
||||
uint16_t *fw_grp_ids;
|
||||
uint16_t dflt_ring_grp;
|
||||
uint16_t mru;
|
||||
uint16_t hash_type;
|
||||
|
|
|
@ -240,9 +240,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
|
|||
for (i = 0, mask = 1;
|
||||
i < RTE_BITMAP_SLAB_BIT_SIZE;
|
||||
i ++, mask <<= 1) {
|
||||
if (unlikely(slab & mask))
|
||||
if (unlikely(slab & mask)) {
|
||||
uint16_t vlan_id = pos + i;
|
||||
|
||||
res = rte_eth_dev_vlan_filter(slave_port_id,
|
||||
(uint16_t)pos, 1);
|
||||
vlan_id, 1);
|
||||
}
|
||||
}
|
||||
found = rte_bitmap_scan(internals->vlan_filter_bmp,
|
||||
&pos, &slab);
|
||||
|
@ -672,9 +675,21 @@ rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
|
|||
internals->user_defined_mac = 0;
|
||||
|
||||
if (internals->slave_count > 0) {
|
||||
int slave_port;
|
||||
/* Get the primary slave location based on the primary port
|
||||
* number as, while slave_add(), we will keep the primary
|
||||
* slave based on slave_count,but not based on the primary port.
|
||||
*/
|
||||
for (slave_port = 0; slave_port < internals->slave_count;
|
||||
slave_port++) {
|
||||
if (internals->slaves[slave_port].port_id ==
|
||||
internals->primary_port)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Set MAC Address of Bonded Device */
|
||||
if (mac_address_set(bonded_eth_dev,
|
||||
&internals->slaves[internals->primary_port].persisted_mac_addr)
|
||||
&internals->slaves[slave_port].persisted_mac_addr)
|
||||
!= 0) {
|
||||
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
|
||||
return -1;
|
||||
|
|
|
@ -273,7 +273,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
|
|||
if (primary_slave_port_id < 0)
|
||||
return -1;
|
||||
|
||||
*(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
|
||||
*(uint16_t *)extra_args = (uint16_t)primary_slave_port_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1912,7 +1912,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
|
||||
if (internals->slave_count == 0) {
|
||||
RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
|
||||
return -1;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (internals->user_defined_mac == 0) {
|
||||
|
@ -1923,19 +1923,15 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
new_mac_addr = &internals->slaves[i].persisted_mac_addr;
|
||||
|
||||
if (new_mac_addr == NULL)
|
||||
return -1;
|
||||
goto out_err;
|
||||
|
||||
if (mac_address_set(eth_dev, new_mac_addr) != 0) {
|
||||
RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
|
||||
eth_dev->data->port_id);
|
||||
return -1;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update all slave devices MACs*/
|
||||
if (mac_address_slaves_update(eth_dev) != 0)
|
||||
return -1;
|
||||
|
||||
/* If bonded device is configure in promiscuous mode then re-apply config */
|
||||
if (internals->promiscuous_en)
|
||||
bond_ethdev_promiscuous_enable(eth_dev);
|
||||
|
@ -1959,7 +1955,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
"bonded port (%d) failed to reconfigure slave device (%d)",
|
||||
eth_dev->data->port_id,
|
||||
internals->slaves[i].port_id);
|
||||
return -1;
|
||||
goto out_err;
|
||||
}
|
||||
/* We will need to poll for link status if any slave doesn't
|
||||
* support interrupts
|
||||
|
@ -1967,6 +1963,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
if (internals->slaves[i].link_status_poll_enabled)
|
||||
internals->link_status_polling_enabled = 1;
|
||||
}
|
||||
|
||||
/* start polling if needed */
|
||||
if (internals->link_status_polling_enabled) {
|
||||
rte_eal_alarm_set(
|
||||
|
@ -1975,6 +1972,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
(void *)&rte_eth_devices[internals->port_id]);
|
||||
}
|
||||
|
||||
/* Update all slave devices MACs*/
|
||||
if (mac_address_slaves_update(eth_dev) != 0)
|
||||
goto out_err;
|
||||
|
||||
if (internals->user_defined_primary_port)
|
||||
bond_ethdev_primary_set(internals, internals->primary_port);
|
||||
|
||||
|
@ -1986,6 +1987,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
|
|||
bond_tlb_enable(internals);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
eth_dev->data->dev_started = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2043,7 +2048,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
|
|||
tlb_last_obytets[internals->active_slaves[i]] = 0;
|
||||
}
|
||||
|
||||
internals->active_slave_count = 0;
|
||||
internals->link_status_polling_enabled = 0;
|
||||
for (i = 0; i < internals->slave_count; i++)
|
||||
internals->slaves[i].last_link_status = 0;
|
||||
|
@ -2519,6 +2523,11 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
|
|||
if (!valid_slave)
|
||||
return rc;
|
||||
|
||||
/* Synchronize lsc callback parallel calls either by real link event
|
||||
* from the slaves PMDs or by the bonding PMD itself.
|
||||
*/
|
||||
rte_spinlock_lock(&internals->lsc_lock);
|
||||
|
||||
/* Search for port in active port list */
|
||||
active_pos = find_slave_by_id(internals->active_slaves,
|
||||
internals->active_slave_count, port_id);
|
||||
|
@ -2526,7 +2535,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
|
|||
rte_eth_link_get_nowait(port_id, &link);
|
||||
if (link.link_status) {
|
||||
if (active_pos < internals->active_slave_count)
|
||||
return rc;
|
||||
goto link_update;
|
||||
|
||||
/* if no active slave ports then set this port to be primary port */
|
||||
if (internals->active_slave_count < 1) {
|
||||
|
@ -2546,7 +2555,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
|
|||
bond_ethdev_primary_set(internals, port_id);
|
||||
} else {
|
||||
if (active_pos == internals->active_slave_count)
|
||||
return rc;
|
||||
goto link_update;
|
||||
|
||||
/* Remove from active slave list */
|
||||
deactivate_slave(bonded_eth_dev, port_id);
|
||||
|
@ -2565,6 +2574,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
|
|||
}
|
||||
}
|
||||
|
||||
link_update:
|
||||
/**
|
||||
* Update bonded device link properties after any change to active
|
||||
* slaves
|
||||
|
@ -2599,7 +2609,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
|
|||
NULL, NULL);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
rte_spinlock_unlock(&internals->lsc_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2766,6 +2779,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
|
|||
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
|
||||
|
||||
rte_spinlock_init(&internals->lock);
|
||||
rte_spinlock_init(&internals->lsc_lock);
|
||||
|
||||
internals->port_id = eth_dev->data->port_id;
|
||||
internals->mode = BONDING_MODE_INVALID;
|
||||
|
@ -2967,6 +2981,10 @@ bond_remove(struct rte_vdev_device *dev)
|
|||
eth_dev->tx_pkt_burst = NULL;
|
||||
|
||||
internals = eth_dev->data->dev_private;
|
||||
/* Try to release mempool used in mode6. If the bond
|
||||
* device is not mode6, free the NULL is not problem.
|
||||
*/
|
||||
rte_mempool_free(internals->mode6.mempool);
|
||||
rte_bitmap_free(internals->vlan_filter_bmp);
|
||||
rte_free(internals->vlan_filter_bmpmem);
|
||||
rte_free(eth_dev->data->dev_private);
|
||||
|
|
|
@ -118,6 +118,7 @@ struct bond_dev_private {
|
|||
uint8_t mode; /**< Link Bonding Mode */
|
||||
|
||||
rte_spinlock_t lock;
|
||||
rte_spinlock_t lsc_lock;
|
||||
|
||||
uint16_t primary_port; /**< Primary Slave Port */
|
||||
uint16_t current_primary_port; /**< Primary Slave Port */
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
DPDK_2.0 {
|
||||
global:
|
||||
|
||||
rte_eth_bond_8023ad_slave_info;
|
||||
rte_eth_bond_active_slaves_get;
|
||||
rte_eth_bond_create;
|
||||
rte_eth_bond_link_monitoring_set;
|
||||
|
|
|
@ -4242,9 +4242,8 @@ struct flash_desc {
|
|||
int t4_get_flash_params(struct adapter *adapter)
|
||||
{
|
||||
/*
|
||||
* Table for non-Numonix supported flash parts. Numonix parts are left
|
||||
* to the preexisting well-tested code. All flash parts have 64KB
|
||||
* sectors.
|
||||
* Table for non-standard supported Flash parts. Note, all Flash
|
||||
* parts must have 64KB sectors.
|
||||
*/
|
||||
static struct flash_desc supported_flash[] = {
|
||||
{ 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
|
||||
|
@ -4253,7 +4252,7 @@ int t4_get_flash_params(struct adapter *adapter)
|
|||
int ret;
|
||||
u32 flashid = 0;
|
||||
unsigned int part, manufacturer;
|
||||
unsigned int density, size;
|
||||
unsigned int density, size = 0;
|
||||
|
||||
/**
|
||||
* Issue a Read ID Command to the Flash part. We decode supported
|
||||
|
@ -4268,6 +4267,9 @@ int t4_get_flash_params(struct adapter *adapter)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* Check to see if it's one of our non-standard supported Flash parts.
|
||||
*/
|
||||
for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
|
||||
if (supported_flash[part].vendor_and_model_id == flashid) {
|
||||
adapter->params.sf_size =
|
||||
|
@ -4278,6 +4280,15 @@ int t4_get_flash_params(struct adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode Flash part size. The code below looks repetative with
|
||||
* common encodings, but that's not guaranteed in the JEDEC
|
||||
* specification for the Read JADEC ID command. The only thing that
|
||||
* we're guaranteed by the JADEC specification is where the
|
||||
* Manufacturer ID is in the returned result. After that each
|
||||
* Manufacturer ~could~ encode things completely differently.
|
||||
* Note, all Flash parts must have 64KB sectors.
|
||||
*/
|
||||
manufacturer = flashid & 0xff;
|
||||
switch (manufacturer) {
|
||||
case 0x20: { /* Micron/Numonix */
|
||||
|
@ -4314,21 +4325,81 @@ int t4_get_flash_params(struct adapter *adapter)
|
|||
case 0x22:
|
||||
size = 1 << 28; /* 256MB */
|
||||
break;
|
||||
default:
|
||||
dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
|
||||
flashid, density);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
adapter->params.sf_size = size;
|
||||
adapter->params.sf_nsec = size / SF_SEC_SIZE;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
|
||||
return -EINVAL;
|
||||
|
||||
case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
|
||||
/**
|
||||
* This Density -> Size decoding table is taken from ISSI
|
||||
* Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x16:
|
||||
size = 1 << 25; /* 32MB */
|
||||
break;
|
||||
case 0x17:
|
||||
size = 1 << 26; /* 64MB */
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0xc2: { /* Macronix */
|
||||
/**
|
||||
* This Density -> Size decoding table is taken from Macronix
|
||||
* Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x17:
|
||||
size = 1 << 23; /* 8MB */
|
||||
break;
|
||||
case 0x18:
|
||||
size = 1 << 24; /* 16MB */
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0xef: { /* Winbond */
|
||||
/**
|
||||
* This Density -> Size decoding table is taken from Winbond
|
||||
* Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x17:
|
||||
size = 1 << 23; /* 8MB */
|
||||
break;
|
||||
case 0x18:
|
||||
size = 1 << 24; /* 16MB */
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we didn't recognize the FLASH part, that's no real issue: the
|
||||
* Hardware/Software contract says that Hardware will _*ALWAYS*_
|
||||
* use a FLASH part which is at least 4MB in size and has 64KB
|
||||
* sectors. The unrecognized FLASH part is likely to be much larger
|
||||
* than 4MB, but that's all we really need.
|
||||
*/
|
||||
if (size == 0) {
|
||||
dev_warn(adapter,
|
||||
"Unknown Flash Part, ID = %#x, assuming 4MB\n",
|
||||
flashid);
|
||||
size = 1 << 22;
|
||||
}
|
||||
|
||||
/**
|
||||
* Store decoded Flash size and fall through into vetting code.
|
||||
*/
|
||||
adapter->params.sf_size = size;
|
||||
adapter->params.sf_nsec = size / SF_SEC_SIZE;
|
||||
|
||||
found:
|
||||
/*
|
||||
* We should reject adapters with FLASHes which are too small. So, emit
|
||||
|
|
|
@ -473,6 +473,11 @@ enum fw_iq_type {
|
|||
FW_IQ_TYPE_FL_INT_CAP,
|
||||
};
|
||||
|
||||
enum fw_iq_iqtype {
|
||||
FW_IQ_IQTYPE_NIC = 1,
|
||||
FW_IQ_IQTYPE_OFLD,
|
||||
};
|
||||
|
||||
struct fw_iq_cmd {
|
||||
__be32 op_to_vfn;
|
||||
__be32 alloc_to_len16;
|
||||
|
@ -606,6 +611,9 @@ struct fw_iq_cmd {
|
|||
(((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
|
||||
#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
|
||||
|
||||
#define S_FW_IQ_CMD_IQTYPE 24
|
||||
#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
|
||||
|
||||
#define S_FW_IQ_CMD_FL0CNGCHMAP 20
|
||||
#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
|
||||
#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
|
||||
|
|
|
@ -226,15 +226,6 @@ static inline int cxgbe_fls(int x)
|
|||
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgbe_ffs - find first bit set
|
||||
* @x: the word to search
|
||||
*/
|
||||
static inline int cxgbe_ffs(int x)
|
||||
{
|
||||
return x ? __builtin_ffs(x) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned long ilog2(unsigned long n)
|
||||
{
|
||||
unsigned int e = 0;
|
||||
|
|
|
@ -1689,6 +1689,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
char z_name[RTE_MEMZONE_NAMESIZE];
|
||||
char z_name_sw[RTE_MEMZONE_NAMESIZE];
|
||||
unsigned int nb_refill;
|
||||
u8 pciechan;
|
||||
|
||||
/* Size needs to be multiple of 16, including status entry. */
|
||||
iq->size = cxgbe_roundup(iq->size, 16);
|
||||
|
@ -1708,6 +1709,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
|
||||
F_FW_CMD_WRITE | F_FW_CMD_EXEC |
|
||||
V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
|
||||
|
||||
pciechan = pi->tx_chan;
|
||||
|
||||
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
|
||||
(sizeof(c) / 16));
|
||||
c.type_to_iqandstindex =
|
||||
|
@ -1719,16 +1723,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
|
||||
-intr_idx - 1));
|
||||
c.iqdroprss_to_iqesize =
|
||||
htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
|
||||
pi->tx_chan) |
|
||||
htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
|
||||
F_FW_IQ_CMD_IQGTSMODE |
|
||||
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
|
||||
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
|
||||
c.iqsize = htons(iq->size);
|
||||
c.iqaddr = cpu_to_be64(iq->phys_addr);
|
||||
if (cong >= 0)
|
||||
c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
|
||||
F_FW_IQ_CMD_IQRO);
|
||||
c.iqns_to_fl0congen =
|
||||
htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
|
||||
V_FW_IQ_CMD_IQTYPE(cong ?
|
||||
FW_IQ_IQTYPE_NIC :
|
||||
FW_IQ_IQTYPE_OFLD) |
|
||||
F_FW_IQ_CMD_IQRO);
|
||||
|
||||
if (fl) {
|
||||
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
|
||||
|
|
|
@ -324,10 +324,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
|||
static int
|
||||
dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
|
||||
struct rte_eth_xstat_name *xstats_names,
|
||||
__rte_unused unsigned int limit)
|
||||
unsigned int limit)
|
||||
{
|
||||
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
|
||||
|
||||
if (limit < stat_cnt)
|
||||
return stat_cnt;
|
||||
|
||||
if (xstats_names != NULL)
|
||||
for (i = 0; i < stat_cnt; i++)
|
||||
snprintf(xstats_names[i].name,
|
||||
|
@ -355,7 +358,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
|
|||
return 0;
|
||||
|
||||
fman_if_stats_get_all(dpaa_intf->fif, values_copy,
|
||||
sizeof(struct dpaa_if_stats));
|
||||
sizeof(struct dpaa_if_stats) / 8);
|
||||
|
||||
for (i = 0; i < stat_cnt; i++)
|
||||
values[i] =
|
||||
|
|
|
@ -1144,12 +1144,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
|||
union dpni_statistics value[3] = {};
|
||||
unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
|
||||
|
||||
if (xstats == NULL)
|
||||
return 0;
|
||||
|
||||
if (n < num)
|
||||
return num;
|
||||
|
||||
if (xstats == NULL)
|
||||
return 0;
|
||||
|
||||
/* Get Counters from page_0*/
|
||||
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
|
||||
0, 0, &value[0]);
|
||||
|
@ -1182,10 +1182,13 @@ err:
|
|||
static int
|
||||
dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
|
||||
struct rte_eth_xstat_name *xstats_names,
|
||||
__rte_unused unsigned int limit)
|
||||
unsigned int limit)
|
||||
{
|
||||
unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
|
||||
|
||||
if (limit < stat_cnt)
|
||||
return stat_cnt;
|
||||
|
||||
if (xstats_names != NULL)
|
||||
for (i = 0; i < stat_cnt; i++)
|
||||
snprintf(xstats_names[i].name,
|
||||
|
|
|
@ -231,7 +231,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
|
|||
token);
|
||||
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
|
||||
cmd_params->num_dpbp = cfg->num_dpbp;
|
||||
for (i = 0; i < DPNI_MAX_DPBP; i++) {
|
||||
for (i = 0; i < cmd_params->num_dpbp; i++) {
|
||||
cmd_params->pool[i].dpbp_id =
|
||||
cpu_to_le16(cfg->pools[i].dpbp_id);
|
||||
cmd_params->pool[i].priority_mask =
|
||||
|
|
|
@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t;
|
|||
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
|
||||
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
|
||||
|
||||
#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
|
||||
#define U64_C(x) x ## ULL
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
|
||||
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
|
||||
#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
|
||||
#define ena_trc_dbg(format, arg...) \
|
||||
|
@ -189,10 +191,15 @@ typedef uint64_t dma_addr_t;
|
|||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%d", ena_alloc_cnt++); \
|
||||
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
handle = mz; \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
} while (0)
|
||||
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
|
||||
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
|
||||
|
@ -207,21 +214,20 @@ typedef uint64_t dma_addr_t;
|
|||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%d", ena_alloc_cnt++); \
|
||||
mz = rte_memzone_reserve(z_name, size, node, 0); \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
|
||||
do { \
|
||||
const struct rte_memzone *mz; \
|
||||
char z_name[RTE_MEMZONE_NAMESIZE]; \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
|
||||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%d", ena_alloc_cnt++); \
|
||||
mz = rte_memzone_reserve(z_name, size, node, 0); \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
virt = rte_zmalloc_socket(NULL, size, 0, node); \
|
||||
} while (0)
|
||||
|
||||
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
|
||||
|
|
|
@ -709,7 +709,7 @@ static int ena_link_update(struct rte_eth_dev *dev,
|
|||
struct rte_eth_link *link = &dev->data->dev_link;
|
||||
|
||||
link->link_status = 1;
|
||||
link->link_speed = ETH_SPEED_NUM_10G;
|
||||
link->link_speed = ETH_SPEED_NUM_NONE;
|
||||
link->link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
|
||||
return 0;
|
||||
|
@ -907,7 +907,7 @@ static int ena_start(struct rte_eth_dev *dev)
|
|||
return rc;
|
||||
|
||||
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
|
||||
ETH_MQ_RX_RSS_FLAG) {
|
||||
ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
|
||||
rc = ena_rss_init_default(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -1278,8 +1278,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
|
|||
|
||||
static int adapters_found;
|
||||
|
||||
// Temporary modification for multi process
|
||||
//memset(adapter, 0, sizeof(struct ena_adapter));
|
||||
memset(adapter, 0, sizeof(struct ena_adapter));
|
||||
ena_dev = &adapter->ena_dev;
|
||||
|
||||
eth_dev->dev_ops = &ena_dev_ops;
|
||||
|
|
|
@ -627,17 +627,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
|
|||
{
|
||||
u64 a0, a1;
|
||||
int wait = 1000;
|
||||
static u32 instance;
|
||||
char name[NAME_MAX];
|
||||
|
||||
if (!vdev->stats) {
|
||||
snprintf((char *)name, sizeof(name),
|
||||
"vnic_stats-%u", instance++);
|
||||
vdev->stats = vdev->alloc_consistent(vdev->priv,
|
||||
sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
|
||||
if (!vdev->stats)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!vdev->stats)
|
||||
return -ENOMEM;
|
||||
|
||||
*stats = vdev->stats;
|
||||
a0 = vdev->stats_pa;
|
||||
|
@ -962,6 +954,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
|
|||
return vdev->intr_coal_timer_info.max_usec;
|
||||
}
|
||||
|
||||
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
|
||||
{
|
||||
char name[NAME_MAX];
|
||||
static u32 instance;
|
||||
|
||||
snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
|
||||
vdev->stats = vdev->alloc_consistent(vdev->priv,
|
||||
sizeof(struct vnic_stats),
|
||||
&vdev->stats_pa, (u8 *)name);
|
||||
return vdev->stats == NULL ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
void vnic_dev_unregister(struct vnic_dev *vdev)
|
||||
{
|
||||
if (vdev) {
|
||||
|
|
|
@ -196,6 +196,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
|
|||
void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
|
||||
unsigned int num_bars);
|
||||
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
|
||||
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
|
||||
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
|
||||
int vnic_dev_get_size(void);
|
||||
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
|
||||
|
|
|
@ -139,6 +139,7 @@ struct enic {
|
|||
u8 adv_filters;
|
||||
u32 flow_filter_mode;
|
||||
u8 filter_tags;
|
||||
uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
|
||||
|
||||
unsigned int flags;
|
||||
unsigned int priv_flags;
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <rte_bus_pci.h>
|
||||
#include <rte_ethdev.h>
|
||||
#include <rte_ethdev_pci.h>
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "vnic_intr.h"
|
||||
|
@ -66,6 +67,8 @@ static const struct rte_pci_id pci_id_enic_map[] = {
|
|||
{.vendor_id = 0, /* sentinel */},
|
||||
};
|
||||
|
||||
#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
|
||||
|
||||
static int
|
||||
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
|
||||
enum rte_filter_op filter_op, void *arg)
|
||||
|
@ -644,6 +647,64 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
|
|||
.filter_ctrl = enicpmd_dev_filter_ctrl,
|
||||
};
|
||||
|
||||
static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
struct enic *enic;
|
||||
|
||||
enic = (struct enic *)opaque;
|
||||
if (strcmp(value, "trunk") == 0) {
|
||||
/* Trunk mode: always tag */
|
||||
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
|
||||
} else if (strcmp(value, "untag") == 0) {
|
||||
/* Untag default VLAN mode: untag if VLAN = default VLAN */
|
||||
enic->ig_vlan_rewrite_mode =
|
||||
IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
|
||||
} else if (strcmp(value, "priority") == 0) {
|
||||
/*
|
||||
* Priority-tag default VLAN mode: priority tag (VLAN header
|
||||
* with ID=0) if VLAN = default
|
||||
*/
|
||||
enic->ig_vlan_rewrite_mode =
|
||||
IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
|
||||
} else if (strcmp(value, "pass") == 0) {
|
||||
/* Pass through mode: do not touch tags */
|
||||
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
|
||||
} else {
|
||||
dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
|
||||
": expected=trunk|untag|priority|pass given=%s\n",
|
||||
value);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enic_check_devargs(struct rte_eth_dev *dev)
|
||||
{
|
||||
static const char *const valid_keys[] = {
|
||||
ENIC_DEVARG_IG_VLAN_REWRITE,
|
||||
NULL};
|
||||
struct enic *enic = pmd_priv(dev);
|
||||
struct rte_kvargs *kvlist;
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
|
||||
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
|
||||
if (!dev->device->devargs)
|
||||
return 0;
|
||||
kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
|
||||
if (!kvlist)
|
||||
return -EINVAL;
|
||||
if (rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
|
||||
enic_parse_ig_vlan_rewrite, enic) < 0) {
|
||||
rte_kvargs_free(kvlist);
|
||||
return -EINVAL;
|
||||
}
|
||||
rte_kvargs_free(kvlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct enic *enicpmd_list_head = NULL;
|
||||
/* Initialize the driver
|
||||
* It returns 0 on success.
|
||||
|
@ -653,6 +714,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
|
|||
struct rte_pci_device *pdev;
|
||||
struct rte_pci_addr *addr;
|
||||
struct enic *enic = pmd_priv(eth_dev);
|
||||
int err;
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
|
||||
|
@ -670,6 +732,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
|
|||
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
|
||||
addr->domain, addr->bus, addr->devid, addr->function);
|
||||
|
||||
err = enic_check_devargs(eth_dev);
|
||||
if (err)
|
||||
return err;
|
||||
return enic_probe(enic);
|
||||
}
|
||||
|
||||
|
@ -695,3 +760,5 @@ static struct rte_pci_driver rte_enic_pmd = {
|
|||
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
|
||||
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
|
||||
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
|
||||
ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
|
||||
|
|
|
@ -782,25 +782,23 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
|
|||
static int instance;
|
||||
|
||||
wq->socket_id = socket_id;
|
||||
if (nb_desc) {
|
||||
if (nb_desc > enic->config.wq_desc_count) {
|
||||
dev_warning(enic,
|
||||
"WQ %d - number of tx desc in cmd line (%d)"\
|
||||
"is greater than that in the UCSM/CIMC adapter"\
|
||||
"policy. Applying the value in the adapter "\
|
||||
"policy (%d)\n",
|
||||
queue_idx, nb_desc, enic->config.wq_desc_count);
|
||||
} else if (nb_desc != enic->config.wq_desc_count) {
|
||||
enic->config.wq_desc_count = nb_desc;
|
||||
dev_info(enic,
|
||||
"TX Queues - effective number of descs:%d\n",
|
||||
nb_desc);
|
||||
}
|
||||
if (nb_desc > enic->config.wq_desc_count) {
|
||||
dev_warning(enic,
|
||||
"WQ %d - number of tx desc in cmd line (%d) "
|
||||
"is greater than that in the UCSM/CIMC adapter "
|
||||
"policy. Applying the value in the adapter "
|
||||
"policy (%d)\n",
|
||||
queue_idx, nb_desc, enic->config.wq_desc_count);
|
||||
nb_desc = enic->config.wq_desc_count;
|
||||
} else if (nb_desc != enic->config.wq_desc_count) {
|
||||
dev_info(enic,
|
||||
"TX Queues - effective number of descs:%d\n",
|
||||
nb_desc);
|
||||
}
|
||||
|
||||
/* Allocate queue resources */
|
||||
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
|
||||
enic->config.wq_desc_count,
|
||||
nb_desc,
|
||||
sizeof(struct wq_enet_desc));
|
||||
if (err) {
|
||||
dev_err(enic, "error in allocation of wq\n");
|
||||
|
@ -808,7 +806,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
|
|||
}
|
||||
|
||||
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
|
||||
socket_id, enic->config.wq_desc_count,
|
||||
socket_id, nb_desc,
|
||||
sizeof(struct cq_enet_wq_desc));
|
||||
if (err) {
|
||||
vnic_wq_free(wq);
|
||||
|
@ -1252,6 +1250,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
|
|||
/* free and reallocate RQs with the new MTU */
|
||||
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
|
||||
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
|
||||
if (!rq->in_use)
|
||||
continue;
|
||||
|
||||
enic_free_rq(rq);
|
||||
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
|
||||
|
@ -1383,6 +1383,15 @@ int enic_probe(struct enic *enic)
|
|||
enic_alloc_consistent,
|
||||
enic_free_consistent);
|
||||
|
||||
/*
|
||||
* Allocate the consistent memory for stats upfront so both primary and
|
||||
* secondary processes can dump stats.
|
||||
*/
|
||||
err = vnic_dev_alloc_stats_mem(enic->vdev);
|
||||
if (err) {
|
||||
dev_err(enic, "Failed to allocate cmd memory, aborting\n");
|
||||
goto err_out_unregister;
|
||||
}
|
||||
/* Issue device open to get device in known state */
|
||||
err = enic_dev_open(enic);
|
||||
if (err) {
|
||||
|
@ -1391,8 +1400,10 @@ int enic_probe(struct enic *enic)
|
|||
}
|
||||
|
||||
/* Set ingress vlan rewrite mode before vnic initialization */
|
||||
dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
|
||||
enic->ig_vlan_rewrite_mode);
|
||||
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
|
||||
IG_VLAN_REWRITE_MODE_PASS_THRU);
|
||||
enic->ig_vlan_rewrite_mode);
|
||||
if (err) {
|
||||
dev_err(enic,
|
||||
"Failed to set ingress vlan rewrite mode, aborting.\n");
|
||||
|
|
|
@ -210,7 +210,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
|
|||
mac);
|
||||
if (ret) {
|
||||
ERROR("Failed to set default MAC address");
|
||||
goto free_args;
|
||||
goto cancel_alarm;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -240,6 +240,8 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
|
|||
mac->addr_bytes[4], mac->addr_bytes[5]);
|
||||
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
||||
return 0;
|
||||
cancel_alarm:
|
||||
failsafe_hotplug_alarm_cancel(dev);
|
||||
free_args:
|
||||
failsafe_args_free(dev);
|
||||
free_subs:
|
||||
|
|
|
@ -287,6 +287,7 @@ fs_dev_remove(struct sub_device *sdev)
|
|||
sdev->state = DEV_ACTIVE;
|
||||
/* fallthrough */
|
||||
case DEV_ACTIVE:
|
||||
failsafe_eth_dev_unregister_callbacks(sdev);
|
||||
rte_eth_dev_close(PORT_ID(sdev));
|
||||
sdev->state = DEV_PROBED;
|
||||
/* fallthrough */
|
||||
|
@ -346,6 +347,35 @@ fs_rxtx_clean(struct sub_device *sdev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sdev == NULL)
|
||||
return;
|
||||
if (sdev->rmv_callback) {
|
||||
ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
|
||||
RTE_ETH_EVENT_INTR_RMV,
|
||||
failsafe_eth_rmv_event_callback,
|
||||
sdev);
|
||||
if (ret)
|
||||
WARN("Failed to unregister RMV callback for sub_device"
|
||||
" %d", SUB_ID(sdev));
|
||||
sdev->rmv_callback = 0;
|
||||
}
|
||||
if (sdev->lsc_callback) {
|
||||
ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
|
||||
RTE_ETH_EVENT_INTR_LSC,
|
||||
failsafe_eth_lsc_event_callback,
|
||||
sdev);
|
||||
if (ret)
|
||||
WARN("Failed to unregister LSC callback for sub_device"
|
||||
" %d", SUB_ID(sdev));
|
||||
sdev->lsc_callback = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
failsafe_dev_remove(struct rte_eth_dev *dev)
|
||||
{
|
||||
|
|
|
@ -124,7 +124,7 @@ fs_dev_configure(struct rte_eth_dev *dev)
|
|||
ERROR("Could not configure sub_device %d", i);
|
||||
return ret;
|
||||
}
|
||||
if (rmv_interrupt) {
|
||||
if (rmv_interrupt && sdev->rmv_callback == 0) {
|
||||
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
|
||||
RTE_ETH_EVENT_INTR_RMV,
|
||||
failsafe_eth_rmv_event_callback,
|
||||
|
@ -132,9 +132,11 @@ fs_dev_configure(struct rte_eth_dev *dev)
|
|||
if (ret)
|
||||
WARN("Failed to register RMV callback for sub_device %d",
|
||||
SUB_ID(sdev));
|
||||
else
|
||||
sdev->rmv_callback = 1;
|
||||
}
|
||||
dev->data->dev_conf.intr_conf.rmv = 0;
|
||||
if (lsc_interrupt) {
|
||||
if (lsc_interrupt && sdev->lsc_callback == 0) {
|
||||
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
|
||||
RTE_ETH_EVENT_INTR_LSC,
|
||||
failsafe_eth_lsc_event_callback,
|
||||
|
@ -142,6 +144,8 @@ fs_dev_configure(struct rte_eth_dev *dev)
|
|||
if (ret)
|
||||
WARN("Failed to register LSC callback for sub_device %d",
|
||||
SUB_ID(sdev));
|
||||
else
|
||||
sdev->lsc_callback = 1;
|
||||
}
|
||||
dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
|
||||
sdev->state = DEV_ACTIVE;
|
||||
|
@ -237,6 +241,7 @@ fs_dev_close(struct rte_eth_dev *dev)
|
|||
PRIV(dev)->state = DEV_ACTIVE - 1;
|
||||
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
|
||||
DEBUG("Closing sub_device %d", i);
|
||||
failsafe_eth_dev_unregister_callbacks(sdev);
|
||||
rte_eth_dev_close(PORT_ID(sdev));
|
||||
sdev->state = DEV_ACTIVE - 1;
|
||||
}
|
||||
|
|
|
@ -117,6 +117,10 @@ struct sub_device {
|
|||
volatile unsigned int remove:1;
|
||||
/* flow isolation state */
|
||||
int flow_isolated:1;
|
||||
/* RMV callback registration state */
|
||||
unsigned int rmv_callback:1;
|
||||
/* LSC callback registration state */
|
||||
unsigned int lsc_callback:1;
|
||||
};
|
||||
|
||||
struct fs_priv {
|
||||
|
@ -187,6 +191,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev);
|
|||
/* ETH_DEV */
|
||||
|
||||
int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
|
||||
void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
|
||||
void failsafe_dev_remove(struct rte_eth_dev *dev);
|
||||
void failsafe_stats_increment(struct rte_eth_stats *to,
|
||||
struct rte_eth_stats *from);
|
||||
|
|
|
@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
|
||||
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
|
||||
#define I40E_PF_ARQT_ARQT_SHIFT 0
|
||||
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
|
||||
|
@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
|
||||
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
|
||||
#define I40E_PF_ATQT_ATQT_SHIFT 0
|
||||
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
|
||||
|
@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
|
||||
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
|
||||
#define I40E_VF_ARQT_MAX_INDEX 127
|
||||
#define I40E_VF_ARQT_ARQT_SHIFT 0
|
||||
|
@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
|
||||
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
|
||||
#define I40E_VF_ATQT_MAX_INDEX 127
|
||||
#define I40E_VF_ATQT_ATQT_SHIFT 0
|
||||
|
@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
|
||||
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
|
||||
#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
|
||||
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
|
||||
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
|
||||
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
|
||||
|
@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
|
||||
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
|
||||
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
|
||||
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
|
||||
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
|
||||
|
@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
|
||||
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
|
||||
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
|
||||
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
|
||||
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
|
||||
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
|
||||
#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
|
||||
#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
|
||||
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
|
||||
#define I40E_QRX_ENA_MAX_INDEX 1535
|
||||
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
|
||||
|
@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_GLNVM_SRCTL_START_SHIFT 30
|
||||
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
|
||||
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
|
||||
#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
|
||||
#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
|
||||
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
|
||||
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
|
||||
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
|
||||
|
@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
|
||||
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
|
||||
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
|
||||
#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
|
||||
#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
|
||||
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
|
||||
#define I40E_VP_MDET_RX_MAX_INDEX 127
|
||||
#define I40E_VP_MDET_RX_VALID_SHIFT 0
|
||||
|
@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
|
||||
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
|
||||
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
|
||||
#define I40E_VF_ARQT1_ARQT_SHIFT 0
|
||||
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
|
||||
|
@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
|
||||
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
|
||||
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
|
||||
#define I40E_VF_ATQT1_ATQT_SHIFT 0
|
||||
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
|
||||
|
|
|
@ -1211,6 +1211,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
|
|||
hw->bus.func = pci_dev->addr.function;
|
||||
hw->adapter_stopped = 0;
|
||||
|
||||
/*
|
||||
* Switch Tag value should not be identical to either the First Tag
|
||||
* or Second Tag values. So set something other than common Ethertype
|
||||
* for internal switching.
|
||||
*/
|
||||
hw->switch_tag = 0xffff;
|
||||
|
||||
/* Check if need to support multi-driver */
|
||||
i40e_support_multi_driver(dev);
|
||||
|
||||
|
@ -1554,6 +1561,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
|
|||
struct rte_flow *p_flow;
|
||||
int ret;
|
||||
uint8_t aq_fail = 0;
|
||||
int retries = 0;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
|
@ -1595,9 +1603,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
|
|||
/* disable uio intr before callback unregister */
|
||||
rte_intr_disable(intr_handle);
|
||||
|
||||
/* register callback func to eal lib */
|
||||
rte_intr_callback_unregister(intr_handle,
|
||||
i40e_dev_interrupt_handler, dev);
|
||||
/* unregister callback func to eal lib */
|
||||
do {
|
||||
ret = rte_intr_callback_unregister(intr_handle,
|
||||
i40e_dev_interrupt_handler, dev);
|
||||
if (ret >= 0) {
|
||||
break;
|
||||
} else if (ret != -EAGAIN) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"intr callback unregister failed: %d",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
i40e_msec_delay(500);
|
||||
} while (retries++ < 5);
|
||||
|
||||
i40e_rm_ethtype_filter_list(pf);
|
||||
i40e_rm_tunnel_filter_list(pf);
|
||||
|
@ -1964,27 +1983,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
|
|||
struct i40e_aq_get_phy_abilities_resp phy_ab;
|
||||
struct i40e_aq_set_phy_config phy_conf;
|
||||
enum i40e_aq_phy_type cnt;
|
||||
uint8_t avail_speed;
|
||||
uint32_t phy_type_mask = 0;
|
||||
|
||||
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
|
||||
I40E_AQ_PHY_FLAG_PAUSE_RX |
|
||||
I40E_AQ_PHY_FLAG_PAUSE_RX |
|
||||
I40E_AQ_PHY_FLAG_LOW_POWER;
|
||||
const uint8_t advt = I40E_LINK_SPEED_40GB |
|
||||
I40E_LINK_SPEED_25GB |
|
||||
I40E_LINK_SPEED_10GB |
|
||||
I40E_LINK_SPEED_1GB |
|
||||
I40E_LINK_SPEED_100MB;
|
||||
int ret = -ENOTSUP;
|
||||
|
||||
/* To get phy capabilities of available speeds. */
|
||||
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
|
||||
NULL);
|
||||
if (status) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
|
||||
status);
|
||||
return ret;
|
||||
}
|
||||
avail_speed = phy_ab.link_speed;
|
||||
|
||||
/* To get the current phy config. */
|
||||
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
|
||||
NULL);
|
||||
if (status)
|
||||
if (status) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
|
||||
status);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If link already up, no need to set up again */
|
||||
if (is_up && phy_ab.phy_type != 0)
|
||||
/* If link needs to go up and it is in autoneg mode the speed is OK,
|
||||
* no need to set up again.
|
||||
*/
|
||||
if (is_up && phy_ab.phy_type != 0 &&
|
||||
abilities & I40E_AQ_PHY_AN_ENABLED &&
|
||||
phy_ab.link_speed != 0)
|
||||
return I40E_SUCCESS;
|
||||
|
||||
memset(&phy_conf, 0, sizeof(phy_conf));
|
||||
|
@ -1993,18 +2025,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
|
|||
abilities &= ~mask;
|
||||
abilities |= phy_ab.abilities & mask;
|
||||
|
||||
/* update ablities and speed */
|
||||
if (abilities & I40E_AQ_PHY_AN_ENABLED)
|
||||
phy_conf.link_speed = advt;
|
||||
else
|
||||
phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
|
||||
|
||||
phy_conf.abilities = abilities;
|
||||
|
||||
/* If link needs to go up, but the force speed is not supported,
|
||||
* Warn users and config the default available speeds.
|
||||
*/
|
||||
if (is_up && !(force_speed & avail_speed)) {
|
||||
PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
|
||||
phy_conf.link_speed = avail_speed;
|
||||
} else {
|
||||
phy_conf.link_speed = is_up ? force_speed : avail_speed;
|
||||
}
|
||||
|
||||
|
||||
/* To enable link, phy_type mask needs to include each type */
|
||||
for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
|
||||
/* PHY type mask needs to include each type except PHY type extension */
|
||||
for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
|
||||
phy_type_mask |= 1 << cnt;
|
||||
|
||||
/* use get_phy_abilities_resp value for the rest */
|
||||
|
@ -2037,11 +2071,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
|
|||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct rte_eth_conf *conf = &dev->data->dev_conf;
|
||||
|
||||
if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
|
||||
conf->link_speeds = ETH_LINK_SPEED_40G |
|
||||
ETH_LINK_SPEED_25G |
|
||||
ETH_LINK_SPEED_20G |
|
||||
ETH_LINK_SPEED_10G |
|
||||
ETH_LINK_SPEED_1G |
|
||||
ETH_LINK_SPEED_100M;
|
||||
}
|
||||
speed = i40e_parse_link_speeds(conf->link_speeds);
|
||||
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
|
||||
if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
|
||||
abilities |= I40E_AQ_PHY_AN_ENABLED;
|
||||
abilities |= I40E_AQ_PHY_LINK_ENABLED;
|
||||
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
|
||||
I40E_AQ_PHY_AN_ENABLED |
|
||||
I40E_AQ_PHY_LINK_ENABLED;
|
||||
|
||||
return i40e_phy_conf_link(hw, abilities, speed, true);
|
||||
}
|
||||
|
@ -2148,13 +2189,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
|
|||
}
|
||||
|
||||
/* Apply link configure */
|
||||
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
|
||||
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
|
||||
ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
|
||||
ETH_LINK_SPEED_40G)) {
|
||||
PMD_DRV_LOG(ERR, "Invalid link setting");
|
||||
goto err_up;
|
||||
}
|
||||
ret = i40e_apply_link_speed(dev);
|
||||
if (I40E_SUCCESS != ret) {
|
||||
PMD_DRV_LOG(ERR, "Fail to apply link setting");
|
||||
|
@ -2297,6 +2331,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
|
|||
i40e_pf_disable_irq0(hw);
|
||||
rte_intr_disable(intr_handle);
|
||||
|
||||
i40e_fdir_teardown(pf);
|
||||
|
||||
/* shutdown and destroy the HMC */
|
||||
i40e_shutdown_lan_hmc(hw);
|
||||
|
||||
|
@ -2308,7 +2344,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
|
|||
pf->vmdq = NULL;
|
||||
|
||||
/* release all the existing VSIs and VEBs */
|
||||
i40e_fdir_teardown(pf);
|
||||
i40e_vsi_release(pf->main_vsi);
|
||||
|
||||
/* shutdown the adminq */
|
||||
|
@ -2444,77 +2479,139 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
|
|||
return i40e_phy_conf_link(hw, abilities, speed, false);
|
||||
}
|
||||
|
||||
int
|
||||
i40e_dev_link_update(struct rte_eth_dev *dev,
|
||||
int wait_to_complete)
|
||||
static __rte_always_inline void
|
||||
update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
|
||||
{
|
||||
#define CHECK_INTERVAL 100 /* 100ms */
|
||||
#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
|
||||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct i40e_link_status link_status;
|
||||
struct rte_eth_link link, old;
|
||||
int status;
|
||||
unsigned rep_cnt = MAX_REPEAT_TIME;
|
||||
bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
|
||||
/* Link status registers and values*/
|
||||
#define I40E_PRTMAC_LINKSTA 0x001E2420
|
||||
#define I40E_REG_LINK_UP 0x40000080
|
||||
#define I40E_PRTMAC_MACC 0x001E24E0
|
||||
#define I40E_REG_MACC_25GB 0x00020000
|
||||
#define I40E_REG_SPEED_MASK 0x38000000
|
||||
#define I40E_REG_SPEED_100MB 0x00000000
|
||||
#define I40E_REG_SPEED_1GB 0x08000000
|
||||
#define I40E_REG_SPEED_10GB 0x10000000
|
||||
#define I40E_REG_SPEED_20GB 0x20000000
|
||||
#define I40E_REG_SPEED_25_40GB 0x18000000
|
||||
uint32_t link_speed;
|
||||
uint32_t reg_val;
|
||||
|
||||
reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
|
||||
link_speed = reg_val & I40E_REG_SPEED_MASK;
|
||||
reg_val &= I40E_REG_LINK_UP;
|
||||
link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
|
||||
|
||||
if (unlikely(link->link_status == 0))
|
||||
return;
|
||||
|
||||
/* Parse the link status */
|
||||
switch (link_speed) {
|
||||
case I40E_REG_SPEED_100MB:
|
||||
link->link_speed = ETH_SPEED_NUM_100M;
|
||||
break;
|
||||
case I40E_REG_SPEED_1GB:
|
||||
link->link_speed = ETH_SPEED_NUM_1G;
|
||||
break;
|
||||
case I40E_REG_SPEED_10GB:
|
||||
link->link_speed = ETH_SPEED_NUM_10G;
|
||||
break;
|
||||
case I40E_REG_SPEED_20GB:
|
||||
link->link_speed = ETH_SPEED_NUM_20G;
|
||||
break;
|
||||
case I40E_REG_SPEED_25_40GB:
|
||||
reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
|
||||
|
||||
if (reg_val & I40E_REG_MACC_25GB)
|
||||
link->link_speed = ETH_SPEED_NUM_25G;
|
||||
else
|
||||
link->link_speed = ETH_SPEED_NUM_40G;
|
||||
|
||||
break;
|
||||
default:
|
||||
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
|
||||
bool enable_lse)
|
||||
{
|
||||
#define CHECK_INTERVAL 100 /* 100ms */
|
||||
#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
|
||||
uint32_t rep_cnt = MAX_REPEAT_TIME;
|
||||
struct i40e_link_status link_status;
|
||||
int status;
|
||||
|
||||
memset(&link, 0, sizeof(link));
|
||||
memset(&old, 0, sizeof(old));
|
||||
memset(&link_status, 0, sizeof(link_status));
|
||||
rte_i40e_dev_atomic_read_link_status(dev, &old);
|
||||
|
||||
do {
|
||||
/* Get link status information from hardware */
|
||||
status = i40e_aq_get_link_info(hw, enable_lse,
|
||||
&link_status, NULL);
|
||||
if (status != I40E_SUCCESS) {
|
||||
link.link_speed = ETH_SPEED_NUM_100M;
|
||||
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
if (unlikely(status != I40E_SUCCESS)) {
|
||||
link->link_speed = ETH_SPEED_NUM_100M;
|
||||
link->link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
PMD_DRV_LOG(ERR, "Failed to get link info");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
|
||||
if (!wait_to_complete || link.link_status)
|
||||
link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
|
||||
if (unlikely(link->link_status != 0))
|
||||
break;
|
||||
|
||||
rte_delay_ms(CHECK_INTERVAL);
|
||||
} while (--rep_cnt);
|
||||
|
||||
if (!link.link_status)
|
||||
goto out;
|
||||
|
||||
/* i40e uses full duplex only */
|
||||
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
|
||||
/* Parse the link status */
|
||||
switch (link_status.link_speed) {
|
||||
case I40E_LINK_SPEED_100MB:
|
||||
link.link_speed = ETH_SPEED_NUM_100M;
|
||||
link->link_speed = ETH_SPEED_NUM_100M;
|
||||
break;
|
||||
case I40E_LINK_SPEED_1GB:
|
||||
link.link_speed = ETH_SPEED_NUM_1G;
|
||||
link->link_speed = ETH_SPEED_NUM_1G;
|
||||
break;
|
||||
case I40E_LINK_SPEED_10GB:
|
||||
link.link_speed = ETH_SPEED_NUM_10G;
|
||||
link->link_speed = ETH_SPEED_NUM_10G;
|
||||
break;
|
||||
case I40E_LINK_SPEED_20GB:
|
||||
link.link_speed = ETH_SPEED_NUM_20G;
|
||||
link->link_speed = ETH_SPEED_NUM_20G;
|
||||
break;
|
||||
case I40E_LINK_SPEED_25GB:
|
||||
link.link_speed = ETH_SPEED_NUM_25G;
|
||||
link->link_speed = ETH_SPEED_NUM_25G;
|
||||
break;
|
||||
case I40E_LINK_SPEED_40GB:
|
||||
link.link_speed = ETH_SPEED_NUM_40G;
|
||||
link->link_speed = ETH_SPEED_NUM_40G;
|
||||
break;
|
||||
default:
|
||||
link.link_speed = ETH_SPEED_NUM_100M;
|
||||
link->link_speed = ETH_SPEED_NUM_100M;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
i40e_dev_link_update(struct rte_eth_dev *dev,
|
||||
int wait_to_complete)
|
||||
{
|
||||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct rte_eth_link link, old;
|
||||
bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
|
||||
|
||||
memset(&link, 0, sizeof(link));
|
||||
memset(&old, 0, sizeof(old));
|
||||
|
||||
rte_i40e_dev_atomic_read_link_status(dev, &old);
|
||||
|
||||
/* i40e uses full duplex only */
|
||||
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
|
||||
ETH_LINK_SPEED_FIXED);
|
||||
|
||||
out:
|
||||
if (!wait_to_complete)
|
||||
update_link_no_wait(hw, &link);
|
||||
else
|
||||
update_link_wait(hw, &link, enable_lse);
|
||||
|
||||
rte_i40e_dev_atomic_write_link_status(dev, &link);
|
||||
if (link.link_status == old.link_status)
|
||||
return -1;
|
||||
|
@ -9693,6 +9790,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
|
|||
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
|
||||
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
|
||||
|
||||
/*
|
||||
* GL_SWR_PM_UP_THR:
|
||||
* The value is not impacted from the link speed, its value is set according
|
||||
* to the total number of ports for a better pipe-monitor configuration.
|
||||
*/
|
||||
static bool
|
||||
i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
|
||||
{
|
||||
#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
|
||||
.device_id = (dev), \
|
||||
.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
|
||||
|
||||
#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
|
||||
.device_id = (dev), \
|
||||
.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
|
||||
|
||||
static const struct {
|
||||
uint16_t device_id;
|
||||
uint32_t val;
|
||||
} swr_pm_table[] = {
|
||||
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
|
||||
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
|
||||
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
|
||||
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
|
||||
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
|
||||
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
|
||||
};
|
||||
uint32_t i;
|
||||
|
||||
if (value == NULL) {
|
||||
PMD_DRV_LOG(ERR, "value is NULL");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
|
||||
if (hw->device_id == swr_pm_table[i].device_id) {
|
||||
*value = swr_pm_table[i].val;
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
|
||||
"value - 0x%08x",
|
||||
hw->device_id, *value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_dev_sync_phy_type(struct i40e_hw *hw)
|
||||
{
|
||||
|
@ -9757,13 +9908,16 @@ i40e_configure_registers(struct i40e_hw *hw)
|
|||
}
|
||||
|
||||
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
|
||||
if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
|
||||
I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
|
||||
reg_table[i].val =
|
||||
I40E_GL_SWR_PM_UP_THR_SF_VALUE;
|
||||
else /* For X710 */
|
||||
reg_table[i].val =
|
||||
I40E_GL_SWR_PM_UP_THR_EF_VALUE;
|
||||
uint32_t cfg_val;
|
||||
|
||||
if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
|
||||
PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
|
||||
"GL_SWR_PM_UP_THR value fixup",
|
||||
hw->device_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
reg_table[i].val = cfg_val;
|
||||
}
|
||||
|
||||
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
|
||||
|
@ -11329,7 +11483,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
|
|||
static int
|
||||
i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
|
||||
uint32_t pkg_size, uint32_t proto_num,
|
||||
struct rte_pmd_i40e_proto_info *proto)
|
||||
struct rte_pmd_i40e_proto_info *proto,
|
||||
enum rte_pmd_i40e_package_op op)
|
||||
{
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
uint32_t pctype_num;
|
||||
|
@ -11342,6 +11497,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
uint32_t i, j, n;
|
||||
int ret;
|
||||
|
||||
if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
|
||||
op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
|
||||
PMD_DRV_LOG(ERR, "Unsupported operation.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
|
||||
(uint8_t *)&pctype_num, sizeof(pctype_num),
|
||||
RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
|
||||
|
@ -11404,8 +11565,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
i40e_find_customized_pctype(pf,
|
||||
I40E_CUSTOMIZED_GTPU);
|
||||
if (new_pctype) {
|
||||
new_pctype->pctype = pctype_value;
|
||||
new_pctype->valid = true;
|
||||
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
|
||||
new_pctype->pctype = pctype_value;
|
||||
new_pctype->valid = true;
|
||||
} else {
|
||||
new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
|
||||
new_pctype->valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11415,8 +11581,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
|
||||
static int
|
||||
i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
|
||||
uint32_t pkg_size, uint32_t proto_num,
|
||||
struct rte_pmd_i40e_proto_info *proto)
|
||||
uint32_t pkg_size, uint32_t proto_num,
|
||||
struct rte_pmd_i40e_proto_info *proto,
|
||||
enum rte_pmd_i40e_package_op op)
|
||||
{
|
||||
struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
|
||||
uint16_t port_id = dev->data->port_id;
|
||||
|
@ -11429,6 +11596,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
bool inner_ip;
|
||||
int ret;
|
||||
|
||||
if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
|
||||
op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
|
||||
PMD_DRV_LOG(ERR, "Unsupported operation.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
|
||||
rte_pmd_i40e_ptype_mapping_reset(port_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get information about new ptype num */
|
||||
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
|
||||
(uint8_t *)&ptype_num, sizeof(ptype_num),
|
||||
|
@ -11547,7 +11725,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
|
||||
void
|
||||
i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
|
||||
uint32_t pkg_size)
|
||||
uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
|
||||
{
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
uint32_t proto_num;
|
||||
|
@ -11556,6 +11734,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
uint32_t i;
|
||||
int ret;
|
||||
|
||||
if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
|
||||
op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
|
||||
PMD_DRV_LOG(ERR, "Unsupported operation.");
|
||||
return;
|
||||
}
|
||||
|
||||
/* get information about protocol number */
|
||||
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
|
||||
(uint8_t *)&proto_num, sizeof(proto_num),
|
||||
|
@ -11589,20 +11773,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
|
|||
/* Check if GTP is supported. */
|
||||
for (i = 0; i < proto_num; i++) {
|
||||
if (!strncmp(proto[i].name, "GTP", 3)) {
|
||||
pf->gtp_support = true;
|
||||
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
|
||||
pf->gtp_support = true;
|
||||
else
|
||||
pf->gtp_support = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update customized pctype info */
|
||||
ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
|
||||
proto_num, proto);
|
||||
proto_num, proto, op);
|
||||
if (ret)
|
||||
PMD_DRV_LOG(INFO, "No pctype is updated.");
|
||||
|
||||
/* Update customized ptype info */
|
||||
ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
|
||||
proto_num, proto);
|
||||
proto_num, proto, op);
|
||||
if (ret)
|
||||
PMD_DRV_LOG(INFO, "No ptype is updated.");
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <rte_hash.h>
|
||||
#include <rte_flow_driver.h>
|
||||
#include <rte_tm_driver.h>
|
||||
#include "rte_pmd_i40e.h"
|
||||
|
||||
#define I40E_VLAN_TAG_SIZE 4
|
||||
|
||||
|
@ -1221,7 +1222,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
|
|||
struct i40e_customized_pctype*
|
||||
i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
|
||||
void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
|
||||
uint32_t pkg_size);
|
||||
uint32_t pkg_size,
|
||||
enum rte_pmd_i40e_package_op op);
|
||||
int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
|
||||
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
|
||||
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
|
||||
|
|
|
@ -2418,7 +2418,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
|
|||
break;
|
||||
}
|
||||
|
||||
if (cus_pctype)
|
||||
if (cus_pctype && cus_pctype->valid)
|
||||
return cus_pctype->pctype;
|
||||
|
||||
return I40E_FILTER_PCTYPE_INVALID;
|
||||
|
|
|
@ -1632,8 +1632,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
i40e_update_customized_info(dev, buff, size);
|
||||
|
||||
/* Find metadata segment */
|
||||
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
|
||||
pkg_hdr);
|
||||
|
@ -1690,6 +1688,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
|
|||
PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
|
||||
else if (is_exist == 3)
|
||||
PMD_DRV_LOG(ERR, "Profile of different group already exists");
|
||||
i40e_update_customized_info(dev, buff, size, op);
|
||||
rte_free(profile_info_sec);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -1737,6 +1736,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
|
|||
}
|
||||
}
|
||||
|
||||
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
|
||||
op == RTE_PMD_I40E_PKG_OP_WR_DEL)
|
||||
i40e_update_customized_info(dev, buff, size, op);
|
||||
|
||||
rte_free(profile_info_sec);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -1366,6 +1366,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
|
|||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
||||
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||
struct ixgbe_hw *hw;
|
||||
int retries = 0;
|
||||
int ret;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
|
@ -1386,8 +1388,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
|
|||
|
||||
/* disable uio intr before callback unregister */
|
||||
rte_intr_disable(intr_handle);
|
||||
rte_intr_callback_unregister(intr_handle,
|
||||
ixgbe_dev_interrupt_handler, eth_dev);
|
||||
|
||||
do {
|
||||
ret = rte_intr_callback_unregister(intr_handle,
|
||||
ixgbe_dev_interrupt_handler, eth_dev);
|
||||
if (ret >= 0) {
|
||||
break;
|
||||
} else if (ret != -EAGAIN) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"intr callback unregister failed: %d",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
rte_delay_ms(100);
|
||||
} while (retries++ < (10 + IXGBE_LINK_UP_TIME));
|
||||
|
||||
/* uninitialize PF if max_vfs not zero */
|
||||
ixgbe_pf_host_uninit(eth_dev);
|
||||
|
@ -2316,11 +2330,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
|
|||
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
|
||||
const struct rte_eth_dcb_rx_conf *conf;
|
||||
|
||||
if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
|
||||
PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
|
||||
IXGBE_DCB_NB_QUEUES);
|
||||
return -EINVAL;
|
||||
}
|
||||
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
|
||||
if (!(conf->nb_tcs == ETH_4_TCS ||
|
||||
conf->nb_tcs == ETH_8_TCS)) {
|
||||
|
@ -2334,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
|
|||
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
|
||||
const struct rte_eth_dcb_tx_conf *conf;
|
||||
|
||||
if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
|
||||
PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
|
||||
IXGBE_DCB_NB_QUEUES);
|
||||
return -EINVAL;
|
||||
}
|
||||
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
|
||||
if (!(conf->nb_tcs == ETH_4_TCS ||
|
||||
conf->nb_tcs == ETH_8_TCS)) {
|
||||
|
@ -3886,7 +3890,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
|||
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
|
||||
* before the link status is correct
|
||||
*/
|
||||
if (mac->type == ixgbe_mac_82599_vf) {
|
||||
if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
|
@ -5822,8 +5826,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
|
|||
|
||||
/* won't configure msix register if no mapping is done
|
||||
* between intr vector and event fd
|
||||
* but if misx has been enabled already, need to configure
|
||||
* auto clean, auto mask and throttling.
|
||||
*/
|
||||
if (!rte_intr_dp_is_en(intr_handle))
|
||||
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
|
||||
if (!rte_intr_dp_is_en(intr_handle) &&
|
||||
!(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
|
||||
return;
|
||||
|
||||
if (rte_intr_allow_others(intr_handle))
|
||||
|
@ -5847,27 +5855,30 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
|
|||
/* Populate the IVAR table and set the ITR values to the
|
||||
* corresponding register.
|
||||
*/
|
||||
for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
|
||||
queue_id++) {
|
||||
/* by default, 1:1 mapping */
|
||||
ixgbe_set_ivar_map(hw, 0, queue_id, vec);
|
||||
intr_handle->intr_vec[queue_id] = vec;
|
||||
if (vec < base + intr_handle->nb_efd - 1)
|
||||
vec++;
|
||||
}
|
||||
if (rte_intr_dp_is_en(intr_handle)) {
|
||||
for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
|
||||
queue_id++) {
|
||||
/* by default, 1:1 mapping */
|
||||
ixgbe_set_ivar_map(hw, 0, queue_id, vec);
|
||||
intr_handle->intr_vec[queue_id] = vec;
|
||||
if (vec < base + intr_handle->nb_efd - 1)
|
||||
vec++;
|
||||
}
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
|
||||
IXGBE_MISC_VEC_ID);
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
case ixgbe_mac_X550:
|
||||
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_82598EB:
|
||||
ixgbe_set_ivar_map(hw, -1,
|
||||
IXGBE_IVAR_OTHER_CAUSES_INDEX,
|
||||
IXGBE_MISC_VEC_ID);
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
case ixgbe_mac_X540:
|
||||
case ixgbe_mac_X550:
|
||||
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
|
||||
IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
|
||||
|
|
|
@ -123,6 +123,11 @@
|
|||
#define IXGBE_5TUPLE_MAX_PRI 7
|
||||
#define IXGBE_5TUPLE_MIN_PRI 1
|
||||
|
||||
/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
|
||||
#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
|
||||
/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
|
||||
#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
|
||||
|
||||
#define IXGBE_RSS_OFFLOAD_ALL ( \
|
||||
ETH_RSS_IPV4 | \
|
||||
ETH_RSS_NONFRAG_IPV4_TCP | \
|
||||
|
|
|
@ -423,9 +423,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
|
|||
IXGBE_FDIRIP6M_TNI_VNI;
|
||||
|
||||
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
|
||||
mac_mask = info->mask.mac_addr_byte_mask;
|
||||
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
|
||||
& IXGBE_FDIRIP6M_INNER_MAC;
|
||||
fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
|
||||
mac_mask = info->mask.mac_addr_byte_mask &
|
||||
(IXGBE_FDIRIP6M_INNER_MAC >>
|
||||
IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
|
||||
fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
|
||||
IXGBE_FDIRIP6M_INNER_MAC);
|
||||
|
||||
switch (info->mask.tunnel_type_mask) {
|
||||
case 0:
|
||||
|
@ -800,10 +803,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
|
|||
input->formatted.inner_mac,
|
||||
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
|
||||
sizeof(input->formatted.inner_mac));
|
||||
input->formatted.tunnel_type =
|
||||
fdir_filter->input.flow.tunnel_flow.tunnel_type;
|
||||
if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
|
||||
RTE_FDIR_TUNNEL_TYPE_VXLAN)
|
||||
input->formatted.tunnel_type =
|
||||
IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
|
||||
else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
|
||||
RTE_FDIR_TUNNEL_TYPE_NVGRE)
|
||||
input->formatted.tunnel_type =
|
||||
IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
|
||||
else
|
||||
PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
|
||||
|
||||
input->formatted.tni_vni =
|
||||
fdir_filter->input.flow.tunnel_flow.tunnel_id;
|
||||
fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1030,8 +1042,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
|
|||
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
|
||||
} else {
|
||||
/* tunnel mode */
|
||||
if (input->formatted.tunnel_type !=
|
||||
RTE_FDIR_TUNNEL_TYPE_NVGRE)
|
||||
if (input->formatted.tunnel_type)
|
||||
tunnel_type = 0x80000000;
|
||||
tunnel_type |= addr_high;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
|
||||
|
@ -1039,6 +1050,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
|
|||
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
|
||||
input->formatted.tni_vni);
|
||||
}
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
|
||||
}
|
||||
|
||||
/* record vlan (little-endian) and flex_bytes(big-endian) */
|
||||
|
|
|
@ -1665,7 +1665,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
|
|||
return -rte_errno;
|
||||
}
|
||||
} else {
|
||||
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
|
||||
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
|
||||
item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
|
||||
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
|
@ -2370,7 +2371,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
|
|||
/* Get the VxLAN info */
|
||||
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
|
||||
rule->ixgbe_fdir.formatted.tunnel_type =
|
||||
RTE_FDIR_TUNNEL_TYPE_VXLAN;
|
||||
IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
|
||||
|
||||
/* Only care about VNI, others should be masked. */
|
||||
if (!item->mask) {
|
||||
|
@ -2422,17 +2423,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
|
|||
vxlan_spec = (const struct rte_flow_item_vxlan *)
|
||||
item->spec;
|
||||
rte_memcpy(((uint8_t *)
|
||||
&rule->ixgbe_fdir.formatted.tni_vni + 1),
|
||||
&rule->ixgbe_fdir.formatted.tni_vni),
|
||||
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
|
||||
rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
|
||||
rule->ixgbe_fdir.formatted.tni_vni);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the NVGRE info */
|
||||
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
|
||||
rule->ixgbe_fdir.formatted.tunnel_type =
|
||||
RTE_FDIR_TUNNEL_TYPE_NVGRE;
|
||||
IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
|
||||
|
||||
/**
|
||||
* Only care about flags0, flags1, protocol and TNI,
|
||||
|
@ -2524,7 +2523,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
|
|||
/* tni is a 24-bits bit field */
|
||||
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
|
||||
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
|
||||
rule->ixgbe_fdir.formatted.tni_vni <<= 8;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ static const struct rte_eth_link pmd_link = {
|
|||
.link_speed = ETH_SPEED_NUM_10G,
|
||||
.link_duplex = ETH_LINK_FULL_DUPLEX,
|
||||
.link_status = ETH_LINK_DOWN,
|
||||
.link_autoneg = ETH_LINK_AUTONEG,
|
||||
.link_autoneg = ETH_LINK_FIXED,
|
||||
};
|
||||
static int is_kni_initialized;
|
||||
|
||||
|
|
|
@ -1479,6 +1479,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
|
|||
/* Configure RSS if device configured with multiple RX queues. */
|
||||
lio_dev_mq_rx_configure(eth_dev);
|
||||
|
||||
/* Before update the link info,
|
||||
* must set linfo.link.link_status64 to 0.
|
||||
*/
|
||||
lio_dev->linfo.link.link_status64 = 0;
|
||||
|
||||
/* start polling for lsc */
|
||||
ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
|
||||
lio_sync_link_state_check,
|
||||
|
|
|
@ -85,6 +85,8 @@ const char *pmd_mlx4_init_params[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static void mlx4_dev_stop(struct rte_eth_dev *dev);
|
||||
|
||||
/**
|
||||
* DPDK callback for Ethernet device configuration.
|
||||
*
|
||||
|
@ -108,7 +110,13 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
|
|||
" flow error type %d, cause %p, message: %s",
|
||||
-ret, strerror(-ret), error.type, error.cause,
|
||||
error.message ? error.message : "(unspecified)");
|
||||
goto exit;
|
||||
}
|
||||
ret = mlx4_intr_install(priv);
|
||||
if (ret)
|
||||
ERROR("%p: interrupt handler installation failed",
|
||||
(void *)dev);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -141,7 +149,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
|
|||
(void *)dev, strerror(-ret));
|
||||
goto err;
|
||||
}
|
||||
ret = mlx4_intr_install(priv);
|
||||
ret = mlx4_rxq_intr_enable(priv);
|
||||
if (ret) {
|
||||
ERROR("%p: interrupt handler installation failed",
|
||||
(void *)dev);
|
||||
|
@ -161,8 +169,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
|
|||
dev->rx_pkt_burst = mlx4_rx_burst;
|
||||
return 0;
|
||||
err:
|
||||
/* Rollback. */
|
||||
priv->started = 0;
|
||||
mlx4_dev_stop(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -187,7 +194,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
|
|||
dev->rx_pkt_burst = mlx4_rx_burst_removed;
|
||||
rte_wmb();
|
||||
mlx4_flow_sync(priv, NULL);
|
||||
mlx4_intr_uninstall(priv);
|
||||
mlx4_rxq_intr_disable(priv);
|
||||
mlx4_rss_deinit(priv);
|
||||
}
|
||||
|
||||
|
@ -212,6 +219,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
|
|||
dev->tx_pkt_burst = mlx4_tx_burst_removed;
|
||||
rte_wmb();
|
||||
mlx4_flow_clean(priv);
|
||||
mlx4_rss_deinit(priv);
|
||||
for (i = 0; i != dev->data->nb_rx_queues; ++i)
|
||||
mlx4_rx_queue_release(dev->data->rx_queues[i]);
|
||||
for (i = 0; i != dev->data->nb_tx_queues; ++i)
|
||||
|
@ -336,7 +344,7 @@ mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
|
|||
return -rte_errno;
|
||||
}
|
||||
if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
|
||||
uint32_t ports = rte_log2_u32(conf->ports.present);
|
||||
uint32_t ports = rte_log2_u32(conf->ports.present + 1);
|
||||
|
||||
if (tmp >= ports) {
|
||||
ERROR("port index %lu outside range [0,%" PRIu32 ")",
|
||||
|
@ -426,6 +434,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
int err = 0;
|
||||
struct ibv_context *attr_ctx = NULL;
|
||||
struct ibv_device_attr device_attr;
|
||||
struct ibv_device_attr_ex device_attr_ex;
|
||||
struct mlx4_conf conf = {
|
||||
.ports.present = 0,
|
||||
};
|
||||
|
@ -486,19 +495,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
ibv_dev = list[i];
|
||||
DEBUG("device opened");
|
||||
if (ibv_query_device(attr_ctx, &device_attr)) {
|
||||
rte_errno = ENODEV;
|
||||
err = ENODEV;
|
||||
goto error;
|
||||
}
|
||||
INFO("%u port(s) detected", device_attr.phys_port_cnt);
|
||||
conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
|
||||
if (mlx4_args(pci_dev->device.devargs, &conf)) {
|
||||
ERROR("failed to process device arguments");
|
||||
rte_errno = EINVAL;
|
||||
err = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Use all ports when none are defined */
|
||||
if (!conf.ports.enabled)
|
||||
conf.ports.enabled = conf.ports.present;
|
||||
/* Retrieve extended device attributes. */
|
||||
if (ibv_query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
|
||||
err = ENODEV;
|
||||
goto error;
|
||||
}
|
||||
for (i = 0; i < device_attr.phys_port_cnt; i++) {
|
||||
uint32_t port = i + 1; /* ports are indexed from one */
|
||||
struct ibv_context *ctx = NULL;
|
||||
|
@ -514,18 +528,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
DEBUG("using port %u", port);
|
||||
ctx = ibv_open_device(ibv_dev);
|
||||
if (ctx == NULL) {
|
||||
rte_errno = ENODEV;
|
||||
err = ENODEV;
|
||||
goto port_error;
|
||||
}
|
||||
/* Check port status. */
|
||||
err = ibv_query_port(ctx, port, &port_attr);
|
||||
if (err) {
|
||||
rte_errno = err;
|
||||
ERROR("port query failed: %s", strerror(rte_errno));
|
||||
err = ENODEV;
|
||||
ERROR("port query failed: %s", strerror(err));
|
||||
goto port_error;
|
||||
}
|
||||
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
|
||||
rte_errno = ENOTSUP;
|
||||
err = ENOTSUP;
|
||||
ERROR("port %d is not configured in Ethernet mode",
|
||||
port);
|
||||
goto port_error;
|
||||
|
@ -535,15 +549,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
port, ibv_port_state_str(port_attr.state),
|
||||
port_attr.state);
|
||||
/* Make asynchronous FD non-blocking to handle interrupts. */
|
||||
if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
|
||||
err = mlx4_fd_set_non_blocking(ctx->async_fd);
|
||||
if (err) {
|
||||
ERROR("cannot make asynchronous FD non-blocking: %s",
|
||||
strerror(rte_errno));
|
||||
strerror(err));
|
||||
goto port_error;
|
||||
}
|
||||
/* Allocate protection domain. */
|
||||
pd = ibv_alloc_pd(ctx);
|
||||
if (pd == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
err = ENOMEM;
|
||||
ERROR("PD allocation failure");
|
||||
goto port_error;
|
||||
}
|
||||
|
@ -552,7 +567,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
sizeof(*priv),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (priv == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
err = ENOMEM;
|
||||
ERROR("priv allocation failure");
|
||||
goto port_error;
|
||||
}
|
||||
|
@ -573,10 +588,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
|
||||
DEBUG("L2 tunnel checksum offloads are %ssupported",
|
||||
(priv->hw_csum_l2tun ? "" : "not "));
|
||||
priv->hw_rss_max_qps =
|
||||
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
|
||||
DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
|
||||
/* Configure the first MAC address by default. */
|
||||
if (mlx4_get_mac(priv, &mac.addr_bytes)) {
|
||||
err = mlx4_get_mac(priv, &mac.addr_bytes);
|
||||
if (err) {
|
||||
ERROR("cannot get MAC address, is mlx4_en loaded?"
|
||||
" (rte_errno: %s)", strerror(rte_errno));
|
||||
" (error: %s)", strerror(err));
|
||||
goto port_error;
|
||||
}
|
||||
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
|
@ -609,8 +628,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
|||
eth_dev = rte_eth_dev_allocate(name);
|
||||
}
|
||||
if (eth_dev == NULL) {
|
||||
err = ENOMEM;
|
||||
ERROR("can not allocate rte ethdev");
|
||||
rte_errno = ENOMEM;
|
||||
goto port_error;
|
||||
}
|
||||
eth_dev->data->dev_private = priv;
|
||||
|
@ -655,8 +674,6 @@ port_error:
|
|||
rte_eth_dev_release_port(eth_dev);
|
||||
break;
|
||||
}
|
||||
if (i == device_attr.phys_port_cnt)
|
||||
return 0;
|
||||
/*
|
||||
* XXX if something went wrong in the loop above, there is a resource
|
||||
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
|
||||
|
@ -668,8 +685,9 @@ error:
|
|||
claim_zero(ibv_close_device(attr_ctx));
|
||||
if (list)
|
||||
ibv_free_device_list(list);
|
||||
assert(rte_errno >= 0);
|
||||
return -rte_errno;
|
||||
if (err)
|
||||
rte_errno = err;
|
||||
return -err;
|
||||
}
|
||||
|
||||
static const struct rte_pci_id mlx4_pci_id_map[] = {
|
||||
|
|
|
@ -126,8 +126,10 @@ struct priv {
|
|||
uint32_t vf:1; /**< This is a VF device. */
|
||||
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
|
||||
uint32_t isolated:1; /**< Toggle isolated mode. */
|
||||
uint32_t rss_init:1; /**< Common RSS context is initialized. */
|
||||
uint32_t hw_csum:1; /* Checksum offload is supported. */
|
||||
uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
|
||||
uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
|
||||
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
|
||||
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
|
||||
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
|
||||
|
@ -170,6 +172,8 @@ const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
|||
|
||||
int mlx4_intr_uninstall(struct priv *priv);
|
||||
int mlx4_intr_install(struct priv *priv);
|
||||
int mlx4_rxq_intr_enable(struct priv *priv);
|
||||
void mlx4_rxq_intr_disable(struct priv *priv);
|
||||
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
|
||||
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue