f-stack/dpdk/drivers/net/bnxt/bnxt_txr.h

95 lines
3.4 KiB
C
Raw Normal View History

2019-06-25 11:12:58 +00:00
/* SPDX-License-Identifier: BSD-3-Clause
2022-09-06 04:00:10 +00:00
* Copyright(c) 2014-2021 Broadcom
2019-06-25 11:12:58 +00:00
* All rights reserved.
2017-04-21 10:43:26 +00:00
*/
#ifndef _BNXT_TXR_H_
#define _BNXT_TXR_H_
#include <rte_io.h>
2019-11-23 08:13:38 +00:00
#define BNXT_MAX_TSO_SEGS 32
#define BNXT_MIN_PKT_SIZE 52
2017-04-21 10:43:26 +00:00
2022-09-06 04:00:10 +00:00
/* Number of transmit descriptors processed per inner loop in vector mode. */
#define BNXT_TX_DESCS_PER_LOOP 4U
2017-04-21 10:43:26 +00:00
struct bnxt_tx_ring_info {
2022-09-06 04:00:10 +00:00
uint16_t tx_raw_prod;
uint16_t tx_raw_cons;
2020-06-18 16:55:50 +00:00
struct bnxt_db_info tx_db;
2017-04-21 10:43:26 +00:00
struct tx_bd_long *tx_desc_ring;
2022-09-06 04:00:10 +00:00
struct rte_mbuf **tx_buf_ring;
2017-04-21 10:43:26 +00:00
rte_iova_t tx_desc_mapping;
2017-04-21 10:43:26 +00:00
struct bnxt_ring *tx_ring_struct;
};
2020-06-18 16:55:50 +00:00
static inline uint32_t bnxt_tx_bds_in_hw(struct bnxt_tx_queue *txq)
{
2022-09-06 04:00:10 +00:00
return ((txq->tx_ring->tx_raw_prod - txq->tx_ring->tx_raw_cons) &
2020-06-18 16:55:50 +00:00
txq->tx_ring->tx_ring_struct->ring_mask);
}
static inline uint32_t bnxt_tx_avail(struct bnxt_tx_queue *txq)
{
/* Tell compiler to fetch tx indices from memory. */
rte_compiler_barrier();
return ((txq->tx_ring->tx_ring_struct->ring_size -
bnxt_tx_bds_in_hw(txq)) - 1);
}
2017-04-21 10:43:26 +00:00
void bnxt_free_tx_rings(struct bnxt *bp);
int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
2021-02-05 08:48:47 +00:00
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
2020-06-18 16:55:50 +00:00
uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif
2022-09-06 04:00:10 +00:00
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
uint16_t bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif
2020-06-18 16:55:50 +00:00
2019-06-25 11:12:58 +00:00
int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
2022-09-06 04:00:10 +00:00
int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM)
#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
TX_BD_LONG_LFLAGS_IP_CHKSUM)
#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
TX_BD_LONG_LFLAGS_IP_CHKSUM)
#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
TX_BD_LONG_LFLAGS_IP_CHKSUM)
#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
TX_BD_LONG_LFLAGS_T_IP_CHKSUM)
2017-04-21 10:43:26 +00:00
#endif