f-stack/dpdk/drivers/net/ena/ena_ethdev.h

270 lines
6.2 KiB
C
Raw Normal View History

2020-06-18 16:55:50 +00:00
/* SPDX-License-Identifier: BSD-3-Clause
2021-02-05 08:48:47 +00:00
* Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
2020-06-18 16:55:50 +00:00
* All rights reserved.
*/
2017-04-21 10:43:26 +00:00
#ifndef _ENA_ETHDEV_H_
#define _ENA_ETHDEV_H_
2019-06-25 11:12:58 +00:00
#include <rte_cycles.h>
2017-04-21 10:43:26 +00:00
#include <rte_pci.h>
#include <rte_bus_pci.h>
2019-06-25 11:12:58 +00:00
#include <rte_timer.h>
2017-04-21 10:43:26 +00:00
#include "ena_com.h"
#define ENA_REGS_BAR 0
#define ENA_MEM_BAR 2
#define ENA_MAX_NUM_QUEUES 128
#define ENA_MIN_FRAME_LEN 64
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
2021-02-05 08:48:47 +00:00
#define ENA_RX_BUF_MIN_SIZE 1400
#define ENA_DEFAULT_RING_SIZE 1024
2017-04-21 10:43:26 +00:00
2019-06-25 11:12:58 +00:00
#define ENA_MIN_MTU 128
2017-04-21 10:43:26 +00:00
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
2019-06-25 11:12:58 +00:00
#define ENA_WD_TIMEOUT_SEC 3
#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
2021-02-05 08:48:47 +00:00
/* While processing submitted and completed descriptors (rx and tx path
* respectively) in a loop it is desired to:
* - perform batch submissions while populating sumbissmion queue
* - avoid blocking transmission of other packets during cleanup phase
* Hence the utilization ratio of 1/8 of a queue size or max value if the size
* of the ring is very big - like 8k Rx rings.
*/
#define ENA_REFILL_THRESH_DIVIDER 8
#define ENA_REFILL_THRESH_PACKET 256
#define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
#define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
2017-04-21 10:43:26 +00:00
struct ena_adapter;
enum ena_ring_type {
ENA_RING_TYPE_RX = 1,
ENA_RING_TYPE_TX = 2,
};
struct ena_tx_buffer {
struct rte_mbuf *mbuf;
unsigned int tx_descs;
unsigned int num_of_bufs;
struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
};
2021-02-05 08:48:47 +00:00
/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
struct ena_rx_buffer {
struct rte_mbuf *mbuf;
struct ena_com_buf ena_buf;
};
2020-06-18 16:55:50 +00:00
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
2021-02-05 08:48:47 +00:00
u32 max_rx_queue_size;
u32 max_tx_queue_size;
2020-06-18 16:55:50 +00:00
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
struct ena_stats_tx {
u64 cnt;
u64 bytes;
u64 prepare_ctx_err;
u64 linearize;
u64 linearize_failed;
u64 tx_poll;
u64 doorbells;
u64 bad_req_id;
u64 available_desc;
};
struct ena_stats_rx {
u64 cnt;
u64 bytes;
u64 refill_partial;
u64 bad_csum;
u64 mbuf_alloc_fail;
u64 bad_desc_num;
u64 bad_req_id;
};
2017-04-21 10:43:26 +00:00
struct ena_ring {
u16 next_to_use;
u16 next_to_clean;
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
2019-06-25 11:12:58 +00:00
/* Holds the empty requests for TX/RX OOO completions */
union {
uint16_t *empty_tx_reqs;
uint16_t *empty_rx_reqs;
};
2017-04-21 10:43:26 +00:00
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
2021-02-05 08:48:47 +00:00
struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
2017-04-21 10:43:26 +00:00
};
2019-06-25 11:12:58 +00:00
struct rte_mbuf **rx_refill_buffer;
2017-04-21 10:43:26 +00:00
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
2021-02-05 08:48:47 +00:00
unsigned int size_mask;
2017-04-21 10:43:26 +00:00
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
__rte_cache_aligned;
struct rte_mempool *mb_pool;
unsigned int port_id;
unsigned int id;
/* Max length PMD can push to device for LLQ */
uint8_t tx_max_header_size;
int configured;
2020-06-18 16:55:50 +00:00
uint8_t *push_buf_intermediate_buf;
2017-04-21 10:43:26 +00:00
struct ena_adapter *adapter;
2019-06-25 11:12:58 +00:00
uint64_t offloads;
u16 sgl_size;
2020-06-18 16:55:50 +00:00
2021-02-05 08:48:47 +00:00
bool disable_meta_caching;
2020-06-18 16:55:50 +00:00
union {
struct ena_stats_rx rx_stats;
struct ena_stats_tx tx_stats;
};
2019-11-23 08:13:38 +00:00
unsigned int numa_socket_id;
2017-04-21 10:43:26 +00:00
} __rte_cache_aligned;
enum ena_adapter_state {
ENA_ADAPTER_STATE_FREE = 0,
ENA_ADAPTER_STATE_INIT = 1,
2019-06-25 11:12:58 +00:00
ENA_ADAPTER_STATE_RUNNING = 2,
2017-04-21 10:43:26 +00:00
ENA_ADAPTER_STATE_STOPPED = 3,
ENA_ADAPTER_STATE_CONFIG = 4,
2019-06-25 11:12:58 +00:00
ENA_ADAPTER_STATE_CLOSED = 5,
2017-04-21 10:43:26 +00:00
};
struct ena_driver_stats {
rte_atomic64_t ierrors;
rte_atomic64_t oerrors;
rte_atomic64_t rx_nombuf;
2021-02-05 08:48:47 +00:00
u64 rx_drops;
2017-04-21 10:43:26 +00:00
};
struct ena_stats_dev {
u64 wd_expired;
2020-06-18 16:55:50 +00:00
u64 dev_start;
u64 dev_stop;
2021-02-05 08:48:47 +00:00
/*
* Tx drops cannot be reported as the driver statistic, because DPDK
* rte_eth_stats structure isn't providing appropriate field for that.
* As a workaround it is being published as an extended statistic.
*/
u64 tx_drops;
};
struct ena_stats_eni {
/*
* The number of packets shaped due to inbound aggregate BW
* allowance being exceeded
*/
uint64_t bw_in_allowance_exceeded;
/*
* The number of packets shaped due to outbound aggregate BW
* allowance being exceeded
*/
uint64_t bw_out_allowance_exceeded;
/* The number of packets shaped due to PPS allowance being exceeded */
uint64_t pps_allowance_exceeded;
/*
* The number of packets shaped due to connection tracking
* allowance being exceeded and leading to failure in establishment
* of new connections
*/
uint64_t conntrack_allowance_exceeded;
/*
* The number of packets shaped due to linklocal packet rate
* allowance being exceeded
*/
uint64_t linklocal_allowance_exceeded;
2017-04-21 10:43:26 +00:00
};
2020-06-18 16:55:50 +00:00
struct ena_offloads {
bool tso4_supported;
bool tx_csum_supported;
bool rx_csum_supported;
2017-04-21 10:43:26 +00:00
};
/* board specific private data structure */
struct ena_adapter {
/* OS defined structs */
struct rte_pci_device *pdev;
struct rte_eth_dev_data *rte_eth_dev_data;
struct rte_eth_dev *rte_dev;
struct ena_com_dev ena_dev __rte_cache_aligned;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
2021-02-05 08:48:47 +00:00
u32 max_tx_ring_size;
2019-06-25 11:12:58 +00:00
u16 max_tx_sgl_size;
2017-04-21 10:43:26 +00:00
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
2021-02-05 08:48:47 +00:00
u32 max_rx_ring_size;
2020-06-18 16:55:50 +00:00
u16 max_rx_sgl_size;
2017-04-21 10:43:26 +00:00
2021-02-05 08:48:47 +00:00
u32 max_num_io_queues;
2017-04-21 10:43:26 +00:00
u16 max_mtu;
2020-06-18 16:55:50 +00:00
struct ena_offloads offloads;
2017-04-21 10:43:26 +00:00
2021-02-05 08:48:47 +00:00
/* The admin queue isn't protected by the lock and is used to
* retrieve statistics from the device. As there is no guarantee that
* application won't try to get statistics from multiple threads, it is
* safer to lock the queue to avoid admin queue failure.
*/
rte_spinlock_t admin_lock;
2017-04-21 10:43:26 +00:00
int id_number;
char name[ENA_NAME_MAX_LEN];
2020-06-18 16:55:50 +00:00
u8 mac_addr[RTE_ETHER_ADDR_LEN];
2017-04-21 10:43:26 +00:00
void *regs;
void *dev_mem_base;
struct ena_driver_stats *drv_stats;
enum ena_adapter_state state;
2019-06-25 11:12:58 +00:00
uint64_t tx_supported_offloads;
uint64_t tx_selected_offloads;
uint64_t rx_supported_offloads;
uint64_t rx_selected_offloads;
bool link_status;
enum ena_regs_reset_reason_types reset_reason;
struct rte_timer timer_wd;
uint64_t timestamp_wd;
uint64_t keep_alive_timeout;
2020-06-18 16:55:50 +00:00
struct ena_stats_dev dev_stats;
2021-02-05 08:48:47 +00:00
struct ena_stats_eni eni_stats;
2020-06-18 16:55:50 +00:00
2019-06-25 11:12:58 +00:00
bool trigger_reset;
bool wd_state;
2021-02-05 08:48:47 +00:00
bool use_large_llq_hdr;
2017-04-21 10:43:26 +00:00
};
#endif /* _ENA_ETHDEV_H_ */