f-stack/dpdk/drivers/net/bnxt/bnxt_txq.c

202 lines
4.6 KiB
C
Raw Normal View History

2019-06-25 11:12:58 +00:00
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2014-2018 Broadcom
* All rights reserved.
2017-04-21 10:43:26 +00:00
*/
#include <inttypes.h>
#include <rte_malloc.h>
#include "bnxt.h"
2022-09-02 04:40:05 +00:00
#include "bnxt_hwrm.h"
2017-04-21 10:43:26 +00:00
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
/*
* TX Queues
*/
2022-09-02 04:40:05 +00:00
uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp)
{
uint64_t tx_offload_capa;
tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp))
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp))
tx_offload_capa |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
if (BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp))
tx_offload_capa |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
if (BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp))
tx_offload_capa |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
if (BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
tx_offload_capa |= DEV_TX_OFFLOAD_IPIP_TNL_TSO;
return tx_offload_capa;
}
2017-04-21 10:43:26 +00:00
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
2019-06-25 11:12:58 +00:00
if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
txq->cp_ring->hw_stats = NULL;
2017-04-21 10:43:26 +00:00
}
static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
{
struct bnxt_sw_tx_bd *sw_ring;
uint16_t i;
2021-01-28 17:08:59 +00:00
if (!txq || !txq->tx_ring)
2019-06-25 11:12:58 +00:00
return;
2017-04-21 10:43:26 +00:00
sw_ring = txq->tx_ring->tx_buf_ring;
if (sw_ring) {
for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
2020-06-18 16:55:50 +00:00
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
2017-04-21 10:43:26 +00:00
sw_ring[i].mbuf = NULL;
}
}
}
}
void bnxt_free_tx_mbufs(struct bnxt *bp)
{
struct bnxt_tx_queue *txq;
int i;
for (i = 0; i < (int)bp->tx_nr_rings; i++) {
txq = bp->tx_queues[i];
bnxt_tx_queue_release_mbufs(txq);
}
}
void bnxt_tx_queue_release_op(void *tx_queue)
{
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
if (txq) {
2020-06-18 16:55:50 +00:00
if (is_bnxt_in_error(txq->bp))
return;
2017-04-21 10:43:26 +00:00
/* Free TX ring hardware descriptors */
bnxt_tx_queue_release_mbufs(txq);
2021-01-28 17:08:59 +00:00
if (txq->tx_ring) {
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
rte_free(txq->tx_ring->tx_ring_struct);
rte_free(txq->tx_ring);
}
2017-04-21 10:43:26 +00:00
/* Free TX completion ring hardware descriptors */
2021-01-28 17:08:59 +00:00
if (txq->cp_ring) {
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
rte_free(txq->cp_ring->cp_ring_struct);
rte_free(txq->cp_ring);
}
2017-04-21 10:43:26 +00:00
bnxt_free_txq_stats(txq);
2019-06-25 11:12:58 +00:00
rte_memzone_free(txq->mz);
txq->mz = NULL;
2017-04-21 10:43:26 +00:00
2020-06-18 16:55:50 +00:00
rte_free(txq->free);
2017-04-21 10:43:26 +00:00
rte_free(txq);
}
}
int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
2019-11-23 08:13:38 +00:00
struct bnxt *bp = eth_dev->data->dev_private;
2017-04-21 10:43:26 +00:00
struct bnxt_tx_queue *txq;
int rc = 0;
2020-06-18 16:55:50 +00:00
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
2022-09-02 04:40:05 +00:00
if (queue_idx >= bnxt_max_rings(bp)) {
2019-06-25 11:12:58 +00:00
PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
return -EINVAL;
}
2021-02-05 08:48:47 +00:00
if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_TX_DESC_CNT) {
2019-06-25 11:12:58 +00:00
PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
2021-01-28 17:08:59 +00:00
return -EINVAL;
2017-04-21 10:43:26 +00:00
}
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[queue_idx];
if (txq) {
bnxt_tx_queue_release_op(txq);
txq = NULL;
}
}
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
2019-06-25 11:12:58 +00:00
PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
2021-01-28 17:08:59 +00:00
return -ENOMEM;
2017-04-21 10:43:26 +00:00
}
2020-06-18 16:55:50 +00:00
txq->free = rte_zmalloc_socket(NULL,
sizeof(struct rte_mbuf *) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq->free) {
PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
rc = -ENOMEM;
2021-01-28 17:08:59 +00:00
goto err;
2020-06-18 16:55:50 +00:00
}
2017-04-21 10:43:26 +00:00
txq->bp = bp;
txq->nb_tx_desc = nb_desc;
2021-02-05 08:48:47 +00:00
txq->tx_free_thresh =
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
txq->offloads = eth_dev->data->dev_conf.txmode.offloads |
tx_conf->offloads;
2020-06-18 16:55:50 +00:00
txq->tx_deferred_start = tx_conf->tx_deferred_start;
2017-04-21 10:43:26 +00:00
rc = bnxt_init_tx_ring_struct(txq, socket_id);
if (rc)
2021-01-28 17:08:59 +00:00
goto err;
2017-04-21 10:43:26 +00:00
txq->queue_id = queue_idx;
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
2022-09-02 04:40:05 +00:00
if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
NULL, "txr")) {
2019-06-25 11:12:58 +00:00
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
2017-04-21 10:43:26 +00:00
rc = -ENOMEM;
2021-01-28 17:08:59 +00:00
goto err;
2017-04-21 10:43:26 +00:00
}
if (bnxt_init_one_tx_ring(txq)) {
2019-06-25 11:12:58 +00:00
PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
2017-04-21 10:43:26 +00:00
rc = -ENOMEM;
2021-01-28 17:08:59 +00:00
goto err;
2017-04-21 10:43:26 +00:00
}
eth_dev->data->tx_queues[queue_idx] = txq;
2021-01-28 17:08:59 +00:00
return 0;
err:
bnxt_tx_queue_release_op(txq);
2017-04-21 10:43:26 +00:00
return rc;
}